X-Git-Url: https://codewiz.org/gitweb?p=geekigeeki.git;a=blobdiff_plain;f=geekigeeki.py;h=01027ee23b69b7da0a60f32c2925ab4f04dcfb1c;hp=ca16283d5ea2f2c78c573b3863a371add9c15aef;hb=4d8eca8c234926b3770f3fc4d150128ad18cea87;hpb=7ce906e26b7c2c91e232508ccd0af6fe9ec4aeec diff --git a/geekigeeki.py b/geekigeeki.py index ca16283..01027ee 100755 --- a/geekigeeki.py +++ b/geekigeeki.py @@ -1,9 +1,9 @@ -#!/usr/bin/python3.0 +#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 1999, 2000 Martin Pool # Copyright 2002 Gerardo Poggiali -# Copyright 2007, 2008 Bernie Innocenti +# Copyright 2007, 2008, 2009 Bernie Innocenti # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -18,32 +18,39 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -__version__ = '$Id$'[4:12] +__version__ = '4.0-' + '$Id$'[4:11] from time import clock start_time = clock() +title_done = False -import cgi, sys, os, re, errno, stat +import cgi, sys, os, re, errno, stat, glob -# Regular expression defining a WikiWord -# (but this definition is also assumed in other places) -word_re = re.compile(r"^\b((([A-Z][a-z0-9]+){2,}/)*([A-Z][a-z0-9]+){2,})\b$") +image_ext = 'png|gif|jpg|jpeg|bmp|ico' +video_ext = "ogg|ogv|oga" # Not supported by Firefox 3.5: mkv|mpg|mpeg|mp4|avi|asf|flv|wmv|qt +image_re = re.compile(r".*\.(" + image_ext + "|" + video_ext + ")", re.IGNORECASE) +video_re = re.compile(r".*\.(" + video_ext + ")", re.IGNORECASE) # FIXME: we accept stuff like foo/../bar and we shouldn't -file_re = re.compile(r"^\b([A-Za-z0-9_\-][A-Za-z0-9_\.\-/]*)\b$") -img_re = re.compile(r"^.*\.(png|gif|jpg|jpeg|bmp|ico)$", re.IGNORECASE) -url_re = re.compile(r"^[a-z]{3,8}://[^\s'\"]+\S$") -link_re = re.compile("(?:\[\[|{{)([^\s\|]+)(?:\s*\|\s*([^\]]+)|)(?:\]\]|}})") - -title_done = False - +file_re = re.compile(r"([A-Za-z0-9_\-][A-Za-z0-9_\.\-/]*)") +url_re = re.compile(r"[a-z]{3,8}://[^\s'\"]+\S") +ext_re = re.compile(r"\.([^\./]+)$") # CGI stuff --------------------------------------------------------- +def config_get(key, default=None): + return globals().get(key, default) def script_name(): return os.environ.get('SCRIPT_NAME', '') +def query_string(): + path_info = os.environ.get('PATH_INFO', '') + if len(path_info) and path_info[0] == '/': + return path_info[1:] or 'FrontPage' + else: + return os.environ.get('QUERY_STRING', '') or 'FrontPage' + def privileged_path(): - return privileged_url or script_name() + return config_get('privileged_url') or script_name() def remote_user(): user = os.environ.get('REMOTE_USER', '') @@ -61,18 +68,45 @@ def get_hostname(addr): except Exception: return addr +def is_external_url(pathname): + return (url_re.match(pathname) or pathname.startswith('/')) + def relative_url(pathname, privileged=False): - if not (url_re.match(pathname) or pathname.startswith('/')): + if not is_external_url(pathname): if privileged: url = privileged_path() else: url = script_name() pathname = url + '/' + pathname - return pathname + return cgi.escape(pathname, quote=True) def permalink(s): return re.sub(' ', '-', re.sub('[^a-z0-9_ ]', '', s.lower()).strip()) +def humanlink(s): + return re.sub(r'(?:.*[/:]|)([^:/\.]+)(?:\.[^/:]+|)$', r'\1', s.replace('_', ' ')) + +# Split arg lists like "blah|blah blah| width=100 | align = center", +# return a list containing anonymous arguments and a map containing the named arguments +def parse_args(s): + args = [] + kvargs = {} + for arg in s.strip('<[{}]>').split('|'): + m = re.match('\s*(\w+)\s*=\s*(.+)\s*', arg) + if m is not None: + kvargs[m.group(1)] = m.group(2) + else: + args.append(arg.strip()) + return (args, kvargs) + +def url_args(kvargs): + argv = [] + for k, v in kvargs.items(): + argv.append(k + '=' + v) + if argv: + return '?' + '&'.join(argv) + return '' + # Formatting stuff -------------------------------------------------- def emit_header(mime_type="text/html"): print("Content-type: " + mime_type + "; charset=utf-8\n") @@ -82,32 +116,11 @@ def send_guru(msg_text, msg_type): print('
')
     if msg_type == 'error':
         print('    Software Failure.  Press left mouse button to continue.\n')
-    print(msg_text)
+    print(cgi.escape(msg_text))
     if msg_type == 'error':
-        print('\n      Guru Meditation #DEADBEEF.ABADC0DE')
-    print('
') - # FIXME: This little JS snippet is harder to pass than ACID 3.0 - print(""" - """) + print '\n Guru Meditation #DEADBEEF.ABADC0DE' + print('' \ + % relative_url('sys/GuruMeditation.js')) def send_title(name, text="Limbo", msg_text=None, msg_type='error', writable=False): global title_done @@ -119,32 +132,33 @@ def send_title(name, text="Limbo", msg_text=None, msg_type='error', writable=Fal print(' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">') print('') - print("%s: %s" % (site_name, text)) + print("%s: %s" % (config_get('site_name', "Unconfigured Wiki"), text)) print(' ') if not name: print(' ') - for meta in meta_urls: - http_equiv, content = meta + for http_equiv, content in config_get('meta_urls', {}): print(' ' % (http_equiv, relative_url(content))) - for link in link_urls: + for link in config_get('link_urls', {}): rel, href = link print(' ' % (rel, relative_url(href))) - if name and writable and privileged_url is not None: + editable = name and writable and config_get('privileged_url') is not None + if editable: print(' ' \ - % (privileged_path() + '?edit=' + name)) + % (privileged_path() + '?a=edit&q=' + name)) - if history_url is not None: + history = config_get('history_url') + if history is not None: print(' ' \ - % relative_url(history_url + '?a=rss')) + % relative_url(history + '?a=rss')) print('') # Body - if name and writable and privileged_url is not None: - print('') + if editable: + print('') else: print('') @@ -153,31 +167,31 @@ def send_title(name, text="Limbo", msg_text=None, msg_type='error', writable=Fal # Navbar print('') @@ -186,23 +200,56 @@ def send_httperror(status="403 Not Found", query=""): send_title(None, msg_text=("%s: on query '%s'" % (status, query))) send_footer() -def link_tag(params, text=None, ss_class=None, privileged=False): +def link_tag(dest, text=None, privileged=False, **kvargs): if text is None: - text = params # default - classattr = '' - if ss_class: - classattr += 'class="%s" ' % ss_class - # Prevent crawlers from following links potentially added by spammers or to generated pages - if ss_class == 'external' or ss_class == 'navlink': - classattr += 'rel="nofollow" ' - elif url_re.match(params): - classattr += 'rel="nofollow" ' - return '%s' % (classattr, relative_url(params, privileged=privileged), text) + text = humanlink(dest) + elif image_re.match(text): + text = '' + text + '' + + link_class = kvargs.get('class', kvargs.get('cssclass', None)) + if not link_class: + if is_external_url(dest): + link_class = 'external' + elif file_re.match(dest) and Page(dest).exists(): + link_class = 'wikilink' + else: + text = config_get('nonexist_pfx', '') + text + link_class = 'nonexistent' + + # Prevent crawlers from following links potentially added by spammers or to generated pages + nofollow = '' + if link_class == 'external' or link_class == 'navlink': + nofollow = 'rel="nofollow" ' + + return '%s' % (link_class, nofollow, relative_url(dest, privileged=privileged), text) + +def link_inline(name, descr=None, kvargs={}): + if not descr: descr = humanlink(name) + url = relative_url(name) + if video_re.match(name): + return '' % url + elif image_re.match(name): + return '%s' % (url, url + url_args(kvargs), descr) + elif file_re.match(name) and not ext_re.search(name): # FIXME: this guesses a wiki page + return Page(name).send_naked(kvargs) + else: + return '' \ + % (url, url, name) + +def link_inline_glob(pattern, descr=None, kvargs={}): + s = '' + for name in glob.glob(pattern): + s += link_inline(name, descr, kvargs) + return s # Search --------------------------------------------------- -def handle_fullsearch(needle): - send_title(None, 'Full text search for "%s"' % (needle)) +def print_search_stats(hits, searched): + print("

%d hits out of %d pages searched.

" % (hits, searched)) + +def handle_fullsearch(query, form): + needle = form['q'].value + send_title(None, 'Full text search for "' + needle + '"') needle_re = re.compile(needle, re.IGNORECASE) hits = [] @@ -220,17 +267,17 @@ def handle_fullsearch(needle): print("
    ") for (count, page_name) in hits: - print('
  • ' + Page(page_name).link_to()) - print(' . . . . ' + repr(count)) + print('

  • ' + link_tag(page_name)) + print(' . . . . ' + `count`) print(['match', 'matches'][count != 1]) print('

  • ') print("
") print_search_stats(len(hits), len(all_pages)) -def handle_titlesearch(needle): - # TODO: check needle is legal -- but probably we can just accept any RE - send_title(None, "Title search for \"" + needle + '"') +def handle_titlesearch(query, form): + needle = form['q'].value + send_title(None, 'Title search for "' + needle + '"') needle_re = re.compile(needle, re.IGNORECASE) all_pages = page_list() @@ -238,27 +285,24 @@ def handle_titlesearch(needle): print("
    ") for filename in hits: - print('
  • ' + Page(filename).link_to() + "

  • ") + print('
  • ' + link_tag(filename) + "

  • ") print("
") print_search_stats(len(hits), len(all_pages)) -def print_search_stats(hits, searched): - print("

%d hits out of %d pages searched.

" % (hits, searched)) - -def handle_raw(pagename): +def handle_raw(pagename, form): if not file_re.match(pagename): send_httperror("403 Forbidden", pagename) return Page(pagename).send_raw() -def handle_edit(pagename): +def handle_edit(pagename, form): if not file_re.match(pagename): send_httperror("403 Forbidden", pagename) return - pg = Page(pagename) + pg = Page(form['q'].value) if 'save' in form: if form['file'].value: pg.save(form['file'].file.read(), form['changelog'].value) @@ -275,28 +319,42 @@ def handle_edit(pagename): text = form['savetext'].value pg.send_editor(text) +def handle_get(pagename, form): + if file_re.match(pagename): + # FIMXE: this is all bullshit, MimeTypes bases its guess on the extension! + from mimetypes import MimeTypes + mimetype, encoding = MimeTypes().guess_type(pagename) + if mimetype: + Page(pagename).send_raw(mimetype=mimetype, args=form) + else: + Page(pagename).format() + else: + send_httperror("403 Forbidden", pagename) + +# Used by sys/macros/WordIndex and sys/macros/TitleIndex def make_index_key(): links = ['%s' % (ch, ch) for ch in 'abcdefghijklmnopqrstuvwxyz'] - return '

'+ ' | '.join(links) + '

' + return '

' + ' | '.join(links) + '

' -def page_list(dirname = None, re = word_re): - return sorted(filter(re.match, os.listdir(dirname or data_dir))) +def page_list(dirname=None, search_re=None): + if search_re is None: + # FIXME: WikiWord is too restrictive now! + search_re = re.compile(r"^\b((([A-Z][a-z0-9]+){2,}/)*([A-Z][a-z0-9]+){2,})\b$") + return sorted(filter(search_re.match, os.listdir(dirname or '.'))) def send_footer(mod_string=None): - if globals().get('debug_cgi', False): + if config_get('debug_cgi', False): cgi.print_arguments() cgi.print_form(form) cgi.print_environ() - print(''' -') + link_inline("sys/footer", kvargs = { 'LAST_MODIFIED': mod_string }) + print("") + +def _macro_ELAPSED_TIME(*args, **kvargs): + return "%03f" % (clock() - start_time) + +def _macro_VERSION(*args, **kvargs): + return __version__ class WikiFormatter: """Object that turns Wiki markup into HTML. @@ -304,8 +362,9 @@ class WikiFormatter: All formatting commands can be parsed one line at a time, though some state is carried over between lines. """ - def __init__(self, raw): + def __init__(self, raw, kvargs=None): self.raw = raw + self.kvargs = kvargs or {} self.h_level = 0 self.in_pre = self.in_html = self.in_table = self.in_li = False self.in_header = True @@ -314,14 +373,15 @@ class WikiFormatter: self.styles = { #wiki html enabled? "//": ["em", False], - "''": ["em", False], "**": ["b", False], - "'''": ["b", False], "##": ["tt", False], - "``": ["tt", False], "__": ["u", False], + "--": ["del", False], "^^": ["sup", False], - ",,": ["sub", False] + ",,": ["sub", False], + "''": ["em", False], # LEGACY + "'''": ["b", False], # LEGACY + "``": ["tt", False], # LEGACY } def _b_repl(self, word): @@ -329,6 +389,9 @@ class WikiFormatter: style[1] = not style[1] return ['' + def _glyph_repl(self, word): + return '—' + def _tit_repl(self, word): if self.h_level: result = '

\n' % self.h_level @@ -345,85 +408,71 @@ class WikiFormatter: def _rule_repl(self, word): return self._undent() + '\n


\n' % (len(word) - 2) - def _word_repl(self, word): - return Page(word).link_to() - - def _img_repl(self, word): - pathname = relative_url(word) - return '' % (pathname, pathname) - - def _url_repl(self, word): - if img_re.match(word): - return '' % (word, word) - else: - return '%s' % (word, word) - def _macro_repl(self, word): - m = re.compile("\<\<([^\s\|\>]+)(?:\s*\|\s*([^\>]+)|)\>\>").match(word) - name = m.group(1) - argv = [name] - if m.group(2): - argv.extend(m.group(2).split('|')) - argv = list(map(str.strip, argv)) - - macro = globals().get('_macro_' + name) - if not macro: - try: - exec(open("macros/" + name + ".py").read(), globals()) - except IOError as err: - if err.errno == errno.ENOENT: pass - macro = globals().get('_macro_' + name) - if macro: - return macro(argv) - else: - return '<<' + '|'.join(argv) + '>>' + try: + args, kvargs = parse_args(word) + if args[0] in self.kvargs: + return self.kvargs[args[0]] + macro = globals().get('_macro_' + args[0]) + if not macro: + exec(open("sys/macros/" + args[0] + ".py").read(), globals()) + macro = globals().get('_macro_' + args[0]) + return macro(*args, **kvargs) + except Exception, e: + msg = cgi.escape(word) + ": " + cgi.escape(e.message) + if not self.in_html: + msg = '' + msg + '' + return msg def _hurl_repl(self, word): - m = link_re.match(word) - name = m.group(1) - descr = m.group(2) - if descr is None: - descr = name - elif img_re.match(m.group(2)): - descr = '' - - return link_tag(name, descr, 'wikilink') + args, kvargs = parse_args(word) + return link_tag(*args, **kvargs) def _inl_repl(self, word): - m = link_re.match(word) - name = m.group(1) - descr = m.group(2) or name - name = relative_url(name) - argv = descr.split('|') - descr = argv.pop(0) - - if argv: - args = '?' + '&'.join(argv) - else: - args = '' - - if descr: - # The "extthumb" nonsense works around a limitation of the HTML block model - return '
%s
%s
' \ - % (name, name + args, descr, descr) + args, kvargs = parse_args(word) + name = args.pop(0) + if len(args): + descr = args.pop(0) + # This double div nonsense works around a limitation of the HTML block model + return '
' \ + + '
' \ + + link_inline_glob(name, descr, kvargs) \ + + '
' + descr + '
' else: - return '' % (name, name + args) - - def _email_repl(self, word): - return '%s' % (word, word) + return link_inline_glob(name, None, kvargs) def _html_repl(self, word): + if not self.in_html and word.startswith('': - self.in_html -= 1 - return '>' + if self.in_html: + return s; # Pass through return {'&': '&', '<': '<', '>': '>'}[s] + def _img_repl(self, word): # LEGACY + return self._inl_repl('{{' + word + '}}') + + def _word_repl(self, word): # LEGACY + if self.in_html: return word # pass through + return link_tag(word) + + def _url_repl(self, word): # LEGACY + if self.in_html: return word # pass through + return link_tag(word) + + def _email_repl(self, word): # LEGACY + if self.in_html: return word # pass through + return '%s' % (word, word) + def _li_repl(self, match): if self.in_li: return '
  • ' @@ -494,56 +543,57 @@ class WikiFormatter: if hit: return getattr(self, '_' + rule + '_repl')(hit) else: - raise "Can't handle match " + repr(match) + raise Exception("Can't handle match " + repr(match)) def print_html(self): print('

    ') - # For each line, we scan through looking for magic - # strings, outputting verbatim any intervening text - # TODO: highlight search words (look at referrer) - scan_re = re.compile( - r"(?:" - # Formatting - + r"(?P\*\*|'''|//|''|##|``|__|\^\^|,,)" - + r"|(?P\={2,6})" - + r"|(?P
    \\\\)" - + r"|(?P^-{3,})" - + r"|(?P\b(FIXME|TODO|DONE)\b)" + scan_re = re.compile(r"""(?: + # Styles and formatting ("--" must cling to a word to disambiguate it from the dash) + (?P \*\* | // | \#\# | __ | --\b | \b-- | \^\^ | ,, | ''' | '' | `` ) + | (?P \={2,6}) + | (?P
    \\\\) + | (?P ^-{3,}) + | (?P \b( FIXME | TODO | DONE )\b ) + | (?P --) # Links - + r"|(?P\<\<([^\s\|\>]+)(?:\s*\|\s*([^\>]+)|)\>\>)" - + r"|(?P\[\[([^\s\|]+)(?:\s*\|\s*([^\]]+)|)\]\])" + | (?P \<\<([^\s\|\>]+)(?:\s*\|\s*([^\>]+)|)\>\>) + | (?P \[\[([^\s\|]+)(?:\s*\|\s*([^\]]+)|)\]\]) # Inline HTML - + r"|(?P<(/|)(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])[^>]*>)" - + r"|(?P[<>&])" + | (?P <(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])\b ) + | (?P ( /\s*> | ) ) + | (?P [<>&] ) # Auto links (LEGACY) - + r"|(?P\b[a-zA-Z0-9_/-]+\.(png|gif|jpg|jpeg|bmp|ico))" - + r"|(?P\b(?:[A-Z][a-z]+){2,}\b)" - + r"|(?P(http|https|ftp|mailto)\:[^\s'\"]+\S)" - + r"|(?P[-\w._+]+\@[\w.-]+)" + | (?P \b[a-zA-Z0-9_/-]+\.(""" + image_ext + "|" + video_ext + r""")) + | (?P \b(?:[A-Z][a-z]+){2,}\b) + | (?P (http|https|ftp|mailto)\:[^\s'\"]+\S) + | (?P [-\w._+]+\@[\w.-]+) - # Lists, divs, spans - + r"|(?P

  • ^\s+[\*#] +)" - + r"|(?P
    \{\{\{|\s*\}\}\})"
    -            + r"|(?P\{\{([^\s\|]+)(?:\s*\|\s*([^\]]+)|)\}\})"
    +            # Lists, divs, spans and inline objects
    +            | (?P
  • ^\s+[\*\#]\s+) + | (?P
       \{\{\{|\s*\}\}\})
    +            | (?P   \{\{([^\s\|]+)(?:\s*\|\s*([^\]]+)|)\}\})
     
                 # Tables
    -            + r"|(?P^\s*\|\|(=|)\s*)"
    -            + r"|(?P\s*\|\|(=|)\s*$)"
    -            + r"|(?P\s*\|\|(=|)\s*)"
    -            + r")")
    -        pre_re = re.compile(
    -            r"(?:"
    -            + r"(?P
    \s*\}\}\})"
    -            + r"|(?P[<>&])"
    -            + r")")
    +            | (?P    ^\s*\|\|(=|)\s*)
    +            | (?P   \s*\|\|(=|)\s*$)
    +            | (?P    \s*\|\|(=|)\s*)
    +
    +            # TODO: highlight search words (look at referrer)
    +          )""", re.VERBOSE)
    +        pre_re = re.compile("""(?:
    +              (?P
    \s*\}\}\})
    +            | (?P[<>&])"
    +            )""", re.VERBOSE)
             blank_re = re.compile(r"^\s*$")
             indent_re = re.compile(r"^\s*")
             tr_re = re.compile(r"^\s*\|\|")
             eol_re = re.compile(r"\r?\n")
    +        # For each line, we scan through looking for magic strings, outputting verbatim any intervening text
    +        #3.0: for self.line in eol_re.split(str(self.raw.expandtabs(), 'utf-8')):
             for self.line in eol_re.split(str(self.raw.expandtabs())):
                 # Skip pragmas
                 if self.in_header:
    @@ -562,7 +612,8 @@ class WikiFormatter:
                         print('

    ') else: indent = indent_re.match(self.line) - print(self._indent_to(len(indent.group(0))), end=' ') + #3.0: print(self._indent_to(len(indent.group(0))), end=' ') + print(self._indent_to(len(indent.group(0)))) print(re.sub(scan_re, self.replace, self.line)) if self.in_pre: print('

    ') @@ -581,33 +632,28 @@ class Page: return re.sub('([a-z])([A-Z])', r'\1 \2', self.page_name) def _filename(self): - return os.path.join(data_dir, self.page_name) + return self.page_name def _tmp_filename(self): - return os.path.join(data_dir, ('#' + self.page_name.replace('/','_') + '.' + repr(os.getpid()) + '#')) + return self.page_name + '.tmp' + str(os.getpid()) + '#' def exists(self): try: os.stat(self._filename()) return True - except OSError as err: + except OSError, err: if err.errno == errno.ENOENT: return False raise err - def link_to(self): - word = self.page_name - if self.exists(): - return link_tag(word, word, 'wikilink') - else: - return link_tag(word, nonexist_pfx + word, 'nonexistent') - - def get_raw_body(self): + def get_raw_body(self, default=None): try: return open(self._filename(), 'rb').read() - except IOError as err: + except IOError, err: if err.errno == errno.ENOENT: - return '' # just doesn't exist, use default + if default is None: + default = '//[[?a=edit&q=%s|Describe %s]]//' % (self.page_name, self.page_name) + return default if err.errno == errno.EISDIR: return self.format_dir() raise err @@ -615,22 +661,24 @@ class Page: def format_dir(self): out = '== ' pathname = '' - for dirname in self.page_name.split('/'): + for dirname in self.page_name.strip('/').split('/'): pathname = (pathname + '/' + dirname) if pathname else dirname out += '[[' + pathname + '|' + dirname + ']]/' out += ' ==\n' for filename in page_list(self._filename(), file_re): - if img_re.match(filename): - if image_maxwidth: - maxwidth_arg = '|maxwidth=' + str(image_maxwidth) - out += '{{' + self.page_name + '/' + filename + '|' + filename + maxwidth_arg + '}}\n' + if image_re.match(filename): + maxwidth = config_get(image_maxwidth, '') + if maxwidth: + maxwidth = ' | maxwidth=' + str(maxwidth) + out += '{{' + self.page_name + '/' + filename + ' | ' + humanlink(filename) + maxwidth + ' | class=thumbleft}}\n' else: out += ' * [[' + self.page_name + '/' + filename + ']]\n' return out def pragmas(self): if not '_pragmas' in self.__dict__: + self._pragmas = {} try: file = open(self._filename(), 'rt') attr_re = re.compile(r"^#(\S*)(.*)$") @@ -640,7 +688,7 @@ class Page: break self._pragmas[m.group(1)] = m.group(2).strip() #print "bernie: pragmas[" + m.group(1) + "] = " + m.group(2) + "
    \n" - except IOError as err: + except IOError, err: if err.errno != errno.ENOENT and err.errno != errno.EISDIR: raise er return self._pragmas @@ -669,9 +717,9 @@ class Page: def can_read(self): return self.can("read", True) - def send_naked(self): + def send_naked(self, kvargs=None): if self.can_read(): - WikiFormatter(self.get_raw_body()).print_html() + WikiFormatter(self.get_raw_body(), kvargs).print_html() else: send_guru("Read access denied by ACLs", "notice") @@ -691,11 +739,11 @@ class Page: try: from time import localtime, strftime modtime = localtime(os.stat(self._filename())[stat.ST_MTIME]) - except OSError as err: + except OSError, err: if err.errno != errno.ENOENT: raise err return None - return strftime(datetime_fmt, modtime) + return strftime(config_get(datetime_fmt, '%a %d %b %Y %I:%M %p'), modtime) def send_editor(self, preview=None): send_title(None, 'Edit ' + self.split_title(), msg_text=self.msg_text, msg_type=self.msg_type) @@ -703,49 +751,25 @@ class Page: send_guru("Write access denied by ACLs", "error") return - filename = '' - if 'file' in form: - filename = form['file'].value - - print(('

    Editing ' + self.page_name - + ' for ' + cgi.escape(remote_user()) - + ' from ' + cgi.escape(get_hostname(remote_host())) - + '

    ')) - print('
    ' % relative_url(self.page_name)) - print('' % (self.page_name)) - print('
    ' % (self.page_name)) - print('' % cgi.escape(preview or self.get_raw_body())) - print(' ' % filename) - print(""" -
    - - - - -
    -
    - - """) - print("

    " + Page('EditingTips').link_to() + "

    ") + if preview is None: + preview = self.get_raw_body(default='') + + link_inline("sys/EditPage", kvargs = { + 'EDIT_BODY': cgi.escape(preview), + #'EDIT_PREVIEW': WikiFormatter(preview).print_html(), + }) + if preview: print("
    ") WikiFormatter(preview).print_html() print("
    ") send_footer() - def send_raw(self, mimetype='text/plain'): - if self.can_read(): - body = self.get_raw_body() - emit_header(mimetype) - print(body) - else: + def send_raw(self, mimetype='text/plain', args=[]): + if not self.can_read(): send_title(None, msg_text='Read access denied by ACLs', msg_type='notice') + return - def send_image(self, mimetype, args=[]): if 'maxwidth' in args: import subprocess emit_header(mimetype) @@ -753,7 +777,9 @@ class Page: subprocess.check_call(['gm', 'convert', self._filename(), '-scale', args['maxwidth'].value + ' >', '-']) else: - self.send_raw(mimetype) + body = self.get_raw_body() + emit_header(mimetype) + print(body) def _write_file(self, data): tmp_filename = self._tmp_filename() @@ -763,8 +789,11 @@ class Page: # Bad Bill! POSIX rename ought to replace. :-( try: os.remove(name) - except OSError as err: + except OSError, err: if err.errno != errno.ENOENT: raise err + path = os.path.split(name)[0] + if not os.path.exists(path): + os.makedirs(path) os.rename(tmp_filename, name) def save(self, newdata, changelog): @@ -775,59 +804,34 @@ class Page: self._write_file(newdata) rc = 0 - if post_edit_hook: - # FIXME: what's the std way to perform shell quoting in python? - cmd = ( post_edit_hook - + " '" + data_dir + '/' + self.page_name - + "' '" + remote_user() - + "' '" + remote_host() - + "' '" + changelog + "'" - ) - out = os.popen(cmd) - output = out.read() - rc = out.close() + if config_get('post_edit_hook'): + import subprocess + cmd = [ + config_get('post_edit_hook'), + self.page_name, remote_user(), + remote_host(), changelog ] + child = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True) + output = child.stdout.read() + rc = child.wait() if rc: - self.msg_text += "Post-editing hook returned %d.\n" % rc - self.msg_text += 'Command was: ' + cmd + '\n' + self.msg_text += "Post-editing hook returned %d. Command was:\n'%s'\n" % (rc, "' '".join(cmd)) if output: self.msg_text += 'Output follows:\n' + output else: self.msg_text = 'Thank you for your contribution. Your attention to detail is appreciated.' self.msg_type = 'success' -def main(): - for cmd in form: - handler = globals().get('handle_' + cmd) - if handler: - handler(form[cmd].value) - break - else: - path_info = os.environ.get('PATH_INFO', '') - if len(path_info) and path_info[0] == '/': - query = path_info[1:] or 'FrontPage' - else: - query = os.environ.get('QUERY_STRING', '') or 'FrontPage' - - if file_re.match(query): - if word_re.match(query): - Page(query).format() - else: - from mimetypes import MimeTypes - mimetype, encoding = MimeTypes().guess_type(query) - if mimetype: - if mimetype.startswith('image/'): - Page(query).send_image(mimetype=mimetype, args=form) - else: - Page(query).send_raw(mimetype=mimetype) - else: - Page(query).format() - else: - send_httperror("403 Forbidden", query) - try: exec(open("geekigeeki.conf.py").read()) + os.chdir(config_get('data_dir', 'data')) form = cgi.FieldStorage() - main() + action = form.getvalue('a', 'get') + handler = globals().get('handle_' + action) + if handler: + handler(query_string(), form) + else: + send_httperror("403 Forbidden", query_string()) + except Exception: import traceback msg_text = traceback.format_exc()