<nowiki>
#
#
#              AUTHORITY.PY -- ADD AUTHORITY TO TAXOBOX
#                           Gdr, 2005-07-05
#
#
# 1. INTRODUCTION
#
# This Python script add an authority to a selected taxobox on the
# English wikipedia.
#
#
# 1.1 USAGE
#
#    ./authority.py --rebuild         Rebuild abbreviation table
#    ./authority.py --query=abbrev    Query abbreviation
#    ./authority.py taxon             Find authority and add it to taxon
#    ./authority.py taxon authority   Add authority to taxon
#
#
# 1.2 LICENCE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.

import getopt
import htmlentitydefs
import os
import pickle
import re
import sys
import unicodedata
import urllib
import wikipedia

class Error(Exception):
    def __init__(self, s):
        wikipedia.output(unicode(s))
        self.s = s

    def __repr__(self):
        return self.s

class Authority:

    # 2. CONFIGURATION

    # 2.1 USER CONFIGURATION

    # Which Wikipedia we are editing.
    site = wikipedia.Site('nl')

    # 'authfile' is the filename in which the tables of author names and
    # abbreviations will be saved.
    authfile = 'authority.dat'

    # A regular expression that matches an authority and abbreviation in
    # a Wikipedia article. (This is the default; you can override it for
    # particular sources; see below.)
    auth_re = re.compile(ur'^\*[ \']*([\w\'., -]+[\w.])[ \']*'
                         ur' +(?:[-\u2013]|&[nm]dash;) +'
                         ur'\[\[([^\]|]+).*\r?\n', re.M|re.U)

    # 'wiki_abbrev_sources' is a dictionary mapping a code letter to a
    # Wikipedia sources for authority abbreviations.  Each source is a
    # dictionary with these keys:
    #
    # name ---- name of the Wikipedia article containing authorities and
    #           their abbreviations
    # re ------ a regular expression matching an authority and its
    #           abbreviation(s). There must be two groups, one for the
    #           abbreviation(s) for that authority and one for the name
    #           of the article about that authority. If omitted, auth_re
    #           is used as the default. Abbreviations are presumed to be
    #           separated by commas.
    # groups -- a tuple giving the group for the abbreviation(s) and the
    #           article; if omitted, (1,2) is the default.
    # fmt ----- format string for a new authority. Use %A for the
    #           abbreviation and %B for the authority.
    # sort ---- How to sort (by 'surname' or by 'abbrev').
    wiki_abbrev_sources = {
        'b': {'name': 'Lijst van biologen naar afkorting',
              'fmt': "* %A - [[%B]]\n",
              'sort': 'abbrev'},
        'z': {'name': 'Lijst van biologen naar afkorting',
              'fmt': "* %A - [[%B]]\n",
              'sort': 'surname'},
        }

    # 'other_abbbrev_sources' is a list of other (non-Wikipedia) sources
    # for abbreviations. Each entry is a dictionary with keys:
    #
    # taxon --- a regular expression matching a taxon; means that this
    #           entry is only appropriate for articles contained in taxa
    #           matching this regexp. For example 'Plant' for a source
    #           listing only botanists, or 'Arthropod' for a source
    #           listsing only entomologists.
    # re ------ a regular expression matching the abbreviation and its
    #           expansion. %A will be replaced by the regexp-escaped
    #           form of the abbreviation we are looking for.  It should
    #           contain one group, matching the expansion.
    # url ----- the URL to visit to find the abbreviation. %A will be
    #           replaced by the URL-encoded form of the abbreviation we
    #           are looking for.
    other_abbrev_sources = [
        {'taxon': 'Plant',
         'url': 'http://www.ipni.org/ipni/authorsearch?find_abbreviation=%A&query_type=by_query',
         're': r'(?u)>%A</a> - (\w(?:&[a-z]+;|[\w.\' -]+)*(?!\d)\w) *[0-9\n]'},
        {'url': 'http://www.ipni.org/ipni/authorsearch?find_surname=%A&query_type=by_query',
         're': r'(?u)>%A</a> - (\w(?:&[a-z]+;|[\w.\' -]+)*(?!\d)\w) *[0-9\n]'},
        ]

    # 'auth_sources' is a list of sources to consult to find the
    # authority for a taxon. Each entry is a dictionary with these keys:
    #
    # taxon --- a regular expression matching a taxon; means that this
    #           entry is only appropriate for articles contained in taxa
    #           matching this regexp. For example 'Plant' for a source
    #           listing only plant names, or 'Coleoptera' for a source
    #           listsing only beetles.
    # url ----- the URL to visit to find the taxon. %T will be replaced
    #           by the URL-encoded form of the taxon we are looking
    #           for, and %S by the SN2000 "subject" area.
    # re ------ a regexp for getting the authority.  %A will be replaced
    #           by the regexp-escaped form of the abbreviation we are
    #           looking for.  It should contain one group, matching the
    #           expansion.
    auth_sources = [
        {'taxon': 'Plant',
         'url': ('http://www.ipni.org/ipni/plantsearch?'
                 'find_wholeName=%T&query_type=by_query'),
         're': r'<i>%T</i> (.*)</a>'},
        {'url': ('http://sn2000.taxonomy.nl/Taxonomicon/TaxonList.aspx?'
                 'searchBy=ScientificName&subject=%S&search=%T'),
         're': r'<i>%T</i>[^<]*<font size="-1"> *(\(?[^<,]+,? +[0-9]+\)?)'},
        ]


    # 2.2 OTHER CONFIGURATION

    # 'rank_to_subject' is a dictionary mapping Linnaean rank in Latin
    # (as used in Wikipedia taxobox template names) to the SN2000
    # "Subject area" in which a taxon can be looked up. Ranks not listed
    # here are looked up in the subject area "High".
    rank_to_subject = {
        'subspecies': 'Species',
        'species': 'Species',
        'subgenus': 'Genus',
        'genus': 'Genus',
        'tribus': 'Family',
        'subfamilia': 'Family',
        'familia': 'Family',
        'superfamilia': 'Family',
        }

    def __init__(self):
        for s in self.wiki_abbrev_sources.values():
            s['page'] = wikipedia.Page(self.site, s['name'])
        self.restore_abbreviations()


    # 3. ABBREVIATIONS
    #
    # We want to be able to find abbreviations and turn them into links
    # to the appropriate article. For example, given the abbreviation
    # 'L.' we need to generate the wikitext '[[Carolus Linnaeus|L.]]'.
    # This section includes the code for finding, storing, and updating
    # these abbreviations.


    # 3.1 LOADING AND SAVING ABBREVIATIONS

    # Load abbreviations from disk.
    def restore_abbreviations(self):
        self.abbrev = {}
        if os.path.isfile(self.authfile):
            f = open(self.authfile, 'r')
            if f:
                self.abbrev = pickle.load(f)
                f.close()

    # Save authorities to disk.
    def save_abbreviations(self):
        f = file('authority.dat', 'w')
        pickle.dump(self.abbrev, f)
        f.close()

    def unhtmlify(self, s):
        s = s.decode('iso-8859-1')
        while 1:
            m = re.search(r'&([a-z]+);', s)
            if not m:
                break
            s = (s[:m.start(0)]
                 + unichr(htmlentitydefs.name2codepoint[m.group(1)])
                 + s[m.end(0):])
        return s


    # Normalize the unicode string 's' into ASCII. The idea is to store
    # the authority Lac'ep`ede under the key 'Lacepede' so that
    # inconsistent accentuation doesn't cause us to miss an
    # abbreviation. We decompose all composed characters and then ignore
    # everything non-ASCII. (This converts eacute->e etc.)
    def normalize(self, s):
        return unicodedata.normalize('NFD', unicode(s)).encode('ascii', 'ignore')

    # Add an abbreviation to the table. 'abbrev' is the abbreviation;
    # 'article' is the title of the Wikipedia article on that authority;
    # 'code' is the code for the list from which it came, if any.
    def add_abbreviation(self, abbrev, article, code = None):
        key = self.normalize(abbrev)
        if not self.abbrev.has_key(key):
            self.abbrev[key] = []
        for a in self.abbrev[key]:
            # Do we already have this authority under this abbreviation?
            if abbrev == a[0] and article == a[1]:
                return
        self.abbrev[key].append((abbrev, article, code))


    # 3.2 USER INTERFACE FOR ADDING A NEW ABBREVIATION

    # If we don't find an abbreviation in any of wiki_abbrev_sources, we can
    # prompt the user to tell us the article title corresponding to the
    # abbreviation; then we can add it to the relevant source.

    # Return the normalized surname of the abbreviation.
    def surname(self, abbrev):
        m = re.search(r'(?ui)(?:de |von |d\')?\w+\.?$',
                      self.normalize(abbrev))
        if m:
            return m.group(0)
        else:
            wikipedia.output(u"No surname for %s" % abbrev)
            return 'a'

    # 'abbrev' is the abbreviation for the authority described at
    # 'article'. Add this to the source given by 'code'.
    def add_abbreviation_to_source(self, abbrev, article, code):
        source = self.wiki_abbrev_sources[code]
        text = source['page'].get()
        if source['sort'] == 'surname':
            sortkey = self.surname(abbrev)
        else:
            sortkey = abbrev
        groups = source.get('groups', (1,2))

        # Format authority for insertion into the source.
        fmt = source['fmt']
        fmt = re.sub('%A', abbrev, fmt)
        if article[-1] == '(':
            fmt = re.sub('%B', article + '|', fmt)
        else:
            fmt = re.sub('%B', article, fmt)

        # Go through abbreviations in the source until we get to the
        # appropriate point in alphabetical order by surname.
        for m in re.finditer(source.get('re', self.auth_re), text):
            newtext = None
            if source['sort'] == 'surname':
                s2 = self.surname(m.group(groups[0]))
            else:
                s2 = m.group(groups[0])
            if sortkey[0] != s2[0]:
                # Sort keys not in the same letter of the alphabet.
                continue
            elif sortkey < s2:
                # New abbrev goes before this one.
                newtext = text[:m.start(0)] + fmt + text[m.start(0):]
            elif re.match(r'(?: *\r?\n)*==', text[m.end(0):]):
                # We've reached the end of the section for the right
                # letter, but not found anywhere to put the new
                # abbrev. So it goes at the end.
                newtext = text[:m.end(0)] + fmt + text[m.end(0):]
            else:
                continue
            # Found a place for it.
            wikipedia.showDiff(source['page'].get(), newtext)
            if wikipedia.input(u'OK? [yN]') == 'y':
                source['page'].put(newtext, 'UcuchaBot - voeg auteur toe %s = %s'
                                   % (abbrev, article))
            return
        wikipedia.output(u'Sorry, nowhere to put authority %s' % fmt)

    # 'abbrev' is the abbreviation for the authority described at
    # 'article'. Ask the user which source to add it to.
    def user_add_abbreviation(self, abbrev, article):
        for code, source in self.wiki_abbrev_sources.items():
            wikipedia.output(u'(%s) %s' % (code, source['name']))
        inp = wikipedia.input(u"Add abbreviation %s = %s to which source? [%s]"
                              % (abbrev, article,
                                 ''.join(self.wiki_abbrev_sources.keys())))
        if self.wiki_abbrev_sources.has_key(inp):
            self.add_abbreviation(abbrev, article, inp)
            self.save_abbreviations()
            self.add_abbreviation_to_source(abbrev, article, inp)
        else:
            self.add_abbreviation(abbrev, article)
            self.save_abbreviations()


    # 3.3 FINDING EXPANSIONS FOR ABBREVIATIONS

    # Rebuild table of authorities from the Wikipedia articles listed in
    # 'wiki_abbrev_sources'.
    def rebuild_abbreviations(self):
        wikipedia.getall(self.site,
                         map(lambda l: l['page'], self.wiki_abbrev_sources.values()))
        for code, s in self.wiki_abbrev_sources.items():
            for m in re.finditer(s.get('re', self.auth_re), s['page'].get()):
                groups = s.get('groups', (1,2))
                abbrevs = m.group(groups[0])
                pagename = m.group(groups[1])
                for a in re.split(r', +', abbrevs):
                    self.add_abbreviation(a, pagename, code)
        self.save_abbreviations()

    # User interface for finding an abbreviation using the stored
    # abbrevs, returning the pair (abbrev, expansion) or None.
    def find_abbreviation_in_store(self, abbrev):
        key = self.normalize(abbrev)
        if self.abbrev.has_key(key):
            if len(self.abbrev[key]) == 1:
                return self.abbrev[key][0]
            for i in range(len(self.abbrev[key])):
                wikipedia.output(u'(%d) %s' % (i + 1, self.abbrev[key][i][1]))
            while 1:
                i = wikipedia.input(u"Which authority? [1-%d]"
                                    % len(self.abbrev[key]))
                if (re.match(r'[0-9]+$', i)
                    and int(i) - 1 in range(len(self.abbrev[key]))):
                    break
            return (abbrev, self.abbrev[key][int(i) - 1][1])
        return None

    # Find abbreviation using 'other_abbrev_sources', returning the pair
    # (abbrev, expansion) or None.
    def find_abbreviation_other(self, abbrev):
        # TODO: check source[taxon]
        for source in self.other_abbrev_sources:
            url = re.sub('%A', urllib.quote(abbrev), source['url'])
            wikipedia.output(u'Trying %s' % url)
            f = urllib.urlopen(url)
            r = re.sub('%A', re.escape(abbrev), source['re'])
            m = re.search(r, f.read())
            f.close()
            if m:
                e = self.unhtmlify(m.group(1))
                self.user_add_abbreviation(abbrev, e)
                return (abbrev, e)
        return None

    # User interface for finding abbreviation using Wikipedia, returning
    # its expansion, or None.
    def find_abbreviation_wiki(self, abbrev):
        # See if there's a Wikipedia page for the abbrev.
        pl = wikipedia.Page(self.site, abbrev)
        if not pl.exists():
            expansions = []
        elif pl.isRedirectPage():
            expansions = [wikipedia.Page(self.site, pl.getRedirectTarget())]
        elif pl.isDisambig():
            expansions = pl.linkedPages()
        else:
            expansions = []
        for i in range(len(expansions)):
            wikipedia.output(u'(%d) %s' % (i + 1, expansions[i].linkname()))
        while 1:
            if expansions:
                inp = wikipedia.input(u'Expansion for %s? [1-%d;aecq]'
                                      % (abbrev, len(expansions)))
            else:
                inp = wikipedia.input(u'Expansion for %s? [aecq]'
                                      % abbrev)
            if inp == 'a':
                abbrev = wikipedia.input(u'Enter new abbrev:')
                return self.find_abbreviation(abbrev)
            elif inp == 'e':
                expansion = wikipedia.input(u'Enter expansion for %s:'
                                            % abbrev)
                self.user_add_abbreviation(abbrev, expansion)
                return (abbrev, expansion)
            elif (re.match(r'[0-9]+$', inp)
                  and int(inp) - 1 in range(len(expansions))):
                expansion = expansions[int(inp) - 1].linkname()
                self.user_add_abbreviation(abbrev, expansion)
                return (abbrev, expansion)
            elif inp == 'c':
                return None
            elif inp == 'q':
                raise Error, "Quit requested"
            elif inp == 'l':
                for i in range(len(expansions)):
                    wikipedia.output(u'(%d) %s' % (i + 1, expansions[i]))
            else:
                wikipedia.output(
                    u'<number> = choose expansion;\n'
                    u'a = enter new abbreviation\n'
                    u'e = enter expansion\n'
                    u'c = continue (with no expansion for abbreviation)\n'
                    u'l = list expansions\n'
                    u'q = quit\n')

    # Find expansion for abbreviation using all available methods,
    # returning the pair (abbrev, expansion) or just abbrev if nothing
    # found.
    def find_abbreviation(self, abbrev):
        if abbrev:
            return (self.find_abbreviation_in_store(abbrev)
                    or self.find_abbreviation_other(abbrev)
                    or self.find_abbreviation_wiki(abbrev)
                    or (abbrev,))
        else:
            return ('',)

    def wikify_abbreviation(self, expansion):
        if 2 <= len(expansion):
            return u'[[%s|%s]]' % (expansion[1], expansion[0])
        else:
            return expansion[0]


    # 4. FINDING THE AUTHORITY FOR A TAXON

    # 'format_authority' takes an 'authority' and the 'text' in which it
    # is to be inserted, formats it appropriately, and returns a
    # string. For species the string is a {{Taxobox section
    # binomial...}} template; for subspecies a {{Taxobox section
    # trinomial...}} and for higher taxa a {{Taxobox authority...}}
    # template.
    def format_authority(self, authority, text):
        r = re.compile(r'^\(|, +[0-9]*| +[0-9]+| +in +| +and +|'
                       r' *\bex\.? +| +& +| +& +|\) *|'
                       r' +et al\.?')
        abbrevs = r.split(authority)
        joins = r.findall(authority)
        expansions = map(self.wikify_abbreviation,
                         map(self.find_abbreviation, abbrevs))
        return reduce(lambda x,y: x+y,
                      map(lambda x: x[0]+x[1], zip(expansions, joins + [''])))

    # 'find_authority' returns the authority for the given taxon. 'text'
    # is the text of the Wikipedia article about that taxon.
    def find_authority(self, taxon, text):
        rank = self.rank_of_taxon(taxon, text)
        subject = self.rank_to_subject.get(rank, 'High')
        for source in self.auth_sources:
            if (source.has_key('taxon') and not
                re.search(r'\| *taxon *= *\[\[%s' % source['taxon'], text)):
                continue
            url = re.sub('%T', urllib.quote(taxon), source['url'])
            url = re.sub('%S', subject, url)
            url = re.sub('%R', rank, url)
            wikipedia.output(u'Trying %s' % url)
            f = urllib.urlopen(url)
            r = re.sub('%T',
                       re.sub(r'\\? +', r'(?: +|</i> +<i>)', re.escape(taxon)),
                       source['re'])
            m = re.search(r, f.read())
            f.close()
            if m:
                return self.unhtmlify(m.group(1))
        wikipedia.output(u'No authority found for %s' % taxon)
        return None


    # 5. UPDATING THE AUTHORITY FOR AN ARTICLE

    kingdom_map = {
        'Plant': 'Plantae',
        'Dieren (rijk)': 'Animalia',
        'Bacterium': 'Bacteria',
        'Fungus': 'Fungi',
        'Protist': 'Protista',
        }

    def kingdom(self, text):
        m = re.search(r'(?mi)^{{Taxobox[ _]+regnum[ _]+entry *\| *'
                      r'taxon *= *\[\[([^\|\]]+)', text)
        if m:
            return self.kingdom_map.get(m.group(1), m.group(1))
        else:
            raise Error, "No kingdom found."

    def rank_of_taxon(self, taxon, text):
        if re.match(r'^[\w-]+ [\w-]+ [\w-]+$', taxon):
            return 'subspecies'
        elif re.match(r'^[\w-]+ [\w-]+$', taxon):
            return 'species'
        m = re.search(r'(?i){{Taxobox[ _]+([a-z]+)[ _]+entry *\| *taxon *= *'
                      r'[ \']*\[*%s[^\w]' % re.escape(taxon), text)
        if not m:
            raise Error, "Can't find taxon %s in taxobox" % taxon
        return m.group(1)

    kingdom_to_color = {
        'Animalia': 'pink',
        'Plantae': 'lightgreen',
        'Fungi': 'lightblue',
        'Archaea': 'darkgray',
        'Protista': 'khaki',
        'Bacteria': 'lightgrey',
        }

    def format_taxobox_authority(self, taxon, rank, kingdom, authority):
        color = self.kingdom_to_color.get(kingdom, 'pink')
        if kingdom == 'Animalia':
            m = re.match(r'^\((.*[^\)])\)?$', authority)
            if m:
                parens = ' parens'
                authority = m.group(1)
            else:
                parens = ''
            m = re.match(r'(.*), +([12][0-9][0-9][0-9])', authority)
            if not m:
                raise Error, "Authority '%s' is missing a date" % authority
            author = m.group(1)
            date = m.group(2)
            if rank == 'species':
                return (u'{{Taxobox section binomial parens%s | '
                        u'color = %s | binomial_name = %s | '
                        u'author = %s | date = %s}}'
                        % (parens, color, taxon, author, date))
            elif rank == 'subspecies':
                return (u'{{Taxobox section trinomial parens%s | '
                        u'color = %s | trinomial_name = %s | '
                        u'author = %s | date = %s}}'
                        % (parens, color, taxon, author, date))
            elif rank == 'genus' or rank == 'subgenus':
                return (u'{{Taxobox %s entry | taxon = '
                        u'\'\'\'\'\'%s\'\'\'\'\'}}<br/>'
                        u'{{Taxobox authority%s | author = %s | date = %s}}'
                        % (rank, taxon, parens, author, date))
            else:
                return (u'{{Taxobox %s entry | taxon = \'\'\'%s\'\'\'}}<br/>'
                        u'{{Taxobox authority%s | author = %s | date = %s}}'
                        % (rank, taxon, parens, author, date))
        elif kingdom in ['Plantae', 'Fungi', 'Bacteria', 'Protista', 'Archaea']:
            if kingdom in ['Bacteria', 'Protista', 'Archaea']:
                authority = re.sub(r',? +([12][0-9][0-9][0-9])', r' \1',
                                   authority)
            elif kingdom in ['Plantae', 'Fungi']:
                authority = re.sub(r',? +([12][0-9][0-9][0-9])', '',
                                   authority)
            if rank == 'species':
                return (u'{{Taxobox section binomial parens | '
                        u'color = %s | binomial_name = %s | '
                        u'author = %s}}' % (color, taxon, authority))
            elif rank == 'subspecies':
                return (u'{{Taxobox section binomial parens | '
                        u'color = %s | binomial_name = %s | '
                        u'author = %s}}' % (color, taxon, authority))
            elif rank == 'genus' or rank == 'subgenus':
                return (u'{{Taxobox %s entry | taxon = '
                        u'\'\'\'\'\'%s\'\'\'\'\'}}<br/>'
                        u'{{Taxobox authority new | authority = %s}}'
                        % (rank, taxon, authority))
            else:
                return (u'{{Taxobox %s entry | taxon = \'\'\'%s\'\'\'}}<br/>'
                        u'{{Taxobox authority new | authority = %s}}'
                        % (rank, taxon, authority))
        else:
            raise Error, "Don't know how to format authority for %s" % kingdom


    # 'find_article' takes the name of an article to start looking at,
    # and returns a Page object.
    def find_article(self, article):
        while 1:
            pl = wikipedia.Page(self.site, article)
            if not pl.exists():
                wikipedia.output(u"No page %s" % pl.linkname())
                i = wikipedia.input(u"Redirect to:")
                if not i:
                    raise Error, "Quit requested"
                pl.put(u"#REDIRECT [[%s]]" % i,
                       u"UcuchaBot - redirect aanmaken: %s to %s"
                       % (article, i))
                article = i
            elif pl.isRedirectPage():
                article = pl.getRedirectTarget()
            elif pl.isDisambig():
                links = pl.linkedPages()
                for i in range(len(links)):
                    wikipedia.output(u'(%d) %s' % (i + 1, links[i]))
                inp = wikipedia.input(u'Choose which article? [1-%d]'
                                      % len(links))
                if (re.match(r'[0-9]+$', inp)
                    and int(inp) - 1 in range(len(links))):
                    article = links[int(inp) - 1].linkname()
                else:
                    raise Error, "Quit requested"
            else:
                return pl

    nomial_re = (r'(?mi)^{{Taxobox[_ ]+section[ _]+(?:b|tr)inomial[a-z _]*\| *'
                 r'color *= *[a-z]+ *\| *(?:b|tr)inomial_name *='
                 r'(?:<center>|\'+| +)*%s[^\}]*}}')

    entry_re = (r'(?mi)^{{Taxobox[_ ]+[a-z]+[ _]+entry *\| *'
                r'taxon *=[ \']*\[*%s\]*[ \']*}} *(?:(?:\n|<br */?> *)'
                r'{{Taxobox[ _]+authority[a-z _]*\| *[^\}]+}})?')

    # 'add_authority_to_article' takes a Page object, a taxon and an
    # authority. It adds the authority to that page.
    def add_authority_to_article(self, pl, taxon, authority, expand = True):
        text = pl.get()
        if expand:
            authority = self.format_authority(authority, text)
        rank = self.rank_of_taxon(taxon, text)
        kingdom = self.kingdom(text)
        if rank == 'species' or rank == 'subspecies':
            nomial_re = (self.nomial_re % re.sub(r'\\? +', ' +', re.escape(taxon)))
            m = re.search(nomial_re, text)
        else:
            entry_re = self.entry_re % re.escape(taxon)
            m = re.search(entry_re, text)
        if m:
            text = (text[:m.start(0)]
                    + self.format_taxobox_authority(taxon, rank, kingdom, authority)
                    + text[m.end(0):])
        else:
            raise Error, "Taxon %s not found in %s." % (taxon, pl.linkname())
        wikipedia.showDiff(pl.get(), text)
        if pl.get() != text: #and wikipedia.input(u"OK? [yn]") == 'y':
            pl.put(text, u'UcuchaBot - auteur toegevoegd voor %s %s'
                   % (taxon, authority))

    def add_authority(self, article, taxon, authority, expand = True):
        pl = self.find_article(article)
        self.add_authority_to_article(pl, taxon, authority, expand)

    def find_and_add_authority(self, article, taxon, expand = True):
        pl = self.find_article(article)
        authority = self.find_authority(taxon, pl.get())
        if authority:
            self.add_authority_to_article(pl, taxon, authority, expand)


def badusage():
    raise Error, ('Usage:\n'
                  '%s --rebuild         Rebuild abbreviation table\n'
                  '%s --query=abbrev    Query abbreviation\n'
                  '%s taxon             Find authority and add it to taxon\n'
                  '%s taxon authority   Add authority to taxon\n'
                  % (sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0]))

def main():
    wikipedia.username = 'UcuchaBot'
    try:
        auth = Authority()
        article = None
        expand = True
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'nra:q:',
                                       ['noexpand', 'rebuild', 'article=',
                                        'query='])
            for o, a in opts:
                if o in ('-q', '--query'):
                    print auth.find_abbreviation(a.decode())
                elif o in ('-r', '--rebuild'):
                    auth.rebuild_abbreviations()
                elif o in ('-a', '--article'):
                    article = a
                elif o in ('-n', '--noexpand'):
                    expand = False
                else:
                    badusage()
                    return
        except getopt.GetoptError:
            badusage()
            return
        if len(args) == 1:
            auth.find_and_add_authority(article or args[0], args[0], expand)
        elif len(args) == 2:
            auth.add_authority(article or args[0], args[0], args[1], expand)
        else:
            badusage()
            return
    except Error:
        return

if __name__ == '__main__':
    try:
        main()
    finally:
        wikipedia.stopme()

</nowiki>