2 # -*- coding: utf-8 -*-
4 # Copyright (C) 1999, 2000 Martin Pool <mbp@humbug.org.au>
5 # Copyright (C) 2002 Gerardo Poggiali
6 # Copyright (C) 2007, 2008, 2009, 2010 Bernie Innocenti <bernie@codewiz.org>
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as
10 # published by the Free Software Foundation, either version 3 of the
11 # License, or (at your option) any later version.
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 __version__ = '4.0-' + '$Id$'[4:11]
17 from time import clock, localtime, gmtime, strftime
21 import cgi, sys, os, re, errno, stat, glob
23 image_ext = 'png|gif|jpg|jpeg|bmp|ico'
24 video_ext = "ogg|ogv|oga" # Not supported by Firefox 3.5: mkv|mpg|mpeg|mp4|avi|asf|flv|wmv|qt
25 image_re = re.compile(r".*\.(" + image_ext + "|" + video_ext + ")$", re.IGNORECASE)
26 video_re = re.compile(r".*\.(" + video_ext + ")$", re.IGNORECASE)
27 # FIXME: we accept stuff like foo/../bar and we shouldn't
28 file_re = re.compile(r"([A-Za-z0-9_\-][A-Za-z0-9_\.\-/ ]*)$")
29 url_re = re.compile(r"[a-z]{3,8}://[^\s'\"]+\S$")
30 ext_re = re.compile(r"\.([^\./]+)$")
32 def config_get(key, default=None):
33 return globals().get(key, default)
36 return os.environ.get('SCRIPT_NAME', '')
38 #TODO: move post-edit hook into wiki, then kill this
40 return os.path.split(os.environ.get('SCRIPT_FILENAME', ''))[0]
43 path_info = os.environ.get('PATH_INFO', '')
44 if len(path_info) and path_info[0] == '/':
45 return path_info[1:] or 'FrontPage'
47 return os.environ.get('QUERY_STRING', '') or 'FrontPage'
50 purl = config_get('privileged_url')
51 return (purl is not None) and os.environ.get('SCRIPT_URI', '').startswith(purl)
54 user = os.environ.get('REMOTE_USER', '')
55 if user is None or user == '' or user == 'anonymous':
56 user = 'AnonymousCoward'
60 return os.environ.get('REMOTE_ADDR', '')
62 def get_hostname(addr):
64 from socket import gethostbyaddr
65 return gethostbyaddr(addr)[0] + ' (' + addr + ')'
69 def is_external_url(pathname):
70 return (url_re.match(pathname) or pathname.startswith('/'))
72 def relative_url(pathname, privileged=False):
73 if not is_external_url(pathname):
75 url = config_get('privileged_url') or script_name()
78 pathname = url + '/' + pathname
79 return cgi.escape(pathname, quote=True)
82 return re.sub(' ', '-', re.sub('[^a-z0-9_ ]', '', s.lower()).strip())
85 return re.sub(r'(?:.*[/:]|)([^:/\.]+)(?:\.[^/:]+|)$', r'\1', s.replace('_', ' '))
87 # Split arg lists like "blah|blah blah| width=100 | align = center",
88 # return a list containing anonymous arguments and a map containing the named arguments
92 for arg in s.strip('<[{}]>').split('|'):
93 m = re.match('\s*(\w+)\s*=\s*(.+)\s*', arg)
95 kvargs[m.group(1)] = m.group(2)
97 args.append(arg.strip())
100 def url_args(kvargs):
102 for k, v in kvargs.items():
103 argv.append(k + '=' + v)
105 return '?' + '&'.join(argv)
108 def emit_header(mtime=None, mime_type="text/html"):
110 print("Last-Modified: " + strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(mtime)))
112 print("Content-type: " + mime_type + "; charset=utf-8")
115 def send_guru(msg_text, msg_type):
116 if not msg_text: return
117 print('<pre id="guru" onclick="this.style.display = \'none\'" class="' + msg_type + '">')
118 if msg_type == 'error':
119 print(' Software Failure. Press left mouse button to continue.\n')
120 print(cgi.escape(msg_text))
121 if msg_type == 'error':
122 print '\n Guru Meditation #DEADBEEF.ABADC0DE'
123 print('</pre><script type="text/javascript" src="%s" defer="defer"></script>' \
124 % relative_url('sys/GuruMeditation.js'))
126 def send_httperror(status="403 Not Found", query="", trace=False):
127 print("Status: %s" % status)
128 msg_text = "%s: on query '%s'" % (status, query)
131 msg_text += '\n\n' + traceback.format_exc()
133 page.send_title(msg_text=msg_text)
136 def link_tag(dest, text=None, privileged=False, **kvargs):
138 text = humanlink(dest)
139 elif image_re.match(text):
140 text = '<img style="border: 0" src="' + relative_url(text) + '" alt="' + text + '" />'
142 link_class = kvargs.get('class', kvargs.get('cssclass', None))
144 if is_external_url(dest):
145 link_class = 'external'
146 elif file_re.match(dest) and Page(dest).exists():
147 link_class = 'wikilink'
149 text = config_get('nonexist_pfx', '') + text
150 link_class = 'nonexistent'
152 # Prevent crawlers from following links potentially added by spammers or to generated pages
154 if link_class == 'external' or link_class == 'navlink':
155 nofollow = 'rel="nofollow" '
157 return '<a class="%s" %shref="%s">%s</a>' % (link_class, nofollow, relative_url(dest, privileged=privileged), text)
159 def link_inline(name, descr=None, kvargs={}):
160 if not descr: descr = humanlink(name)
161 url = relative_url(name)
162 if video_re.match(name):
163 return '<video controls="1" src="%s">Your browser does not support HTML5 video</video>' % url
164 elif image_re.match(name):
165 return '<a href="%s"><img border="0" src="%s" alt="%s" /></a>' % (url, url + url_args(kvargs), descr)
166 elif file_re.match(name) and not ext_re.search(name): # FIXME: this guesses a wiki page
167 Page(name).send_naked(kvargs) # FIXME: we should return the page as a string rather than print it
170 return '<iframe width="100%%" scrolling="auto" frameborder="0" src="%s"><a href="%s">%s</a></iframe>' \
173 def link_inline_glob(pattern, descr=None, kvargs={}):
174 if not url_re.match(pattern) and bool(set(pattern) & set('?*[')):
176 for name in glob.glob(pattern):
177 s += link_inline(name, descr, kvargs)
180 return link_inline(pattern, descr, kvargs)
182 def search_stats(hits, searched):
183 return "%d hits out of %d pages searched.\n" % (hits, searched)
185 def handle_fullsearch(query, form):
186 needle = form['q'].value
187 Page().send_title(text='Full text search for "' + needle + '"')
189 needle_re = re.compile(needle, re.IGNORECASE)
191 all_pages = page_list()
192 for page_name in all_pages:
193 body = Page(page_name).get_raw_body()
194 count = len(needle_re.findall(body))
196 hits.append((count, page_name))
198 # The default comparison for tuples compares elements in order, so this sorts by number of hits
203 for (count, page_name) in hits:
204 out += ' * [[' + page_name + ']] . . . ' + str(count) + ' ' + ['match', 'matches'][count != 1] + '\n'
206 out += search_stats(len(hits), len(all_pages))
207 WikiFormatter(out).print_html()
209 def handle_titlesearch(query, form):
210 needle = form['q'].value
211 Page().send_title(text='Title search for "' + needle + '"')
213 needle_re = re.compile(needle, re.IGNORECASE)
214 all_pages = page_list()
215 hits = list(filter(needle_re.search, all_pages))
218 for filename in hits:
219 out += ' * [[' + filename + ']]\n'
221 out += search_stats(len(hits), len(all_pages))
222 WikiFormatter(out).print_html()
224 def handle_raw(pagename, form):
225 Page(pagename).send_raw()
227 def handle_atom(pagename, form):
228 Page(pagename).send_atom()
230 def handle_edit(pagename, form):
231 pg = Page(form['q'].value)
233 if form['file'].value:
234 pg.save(form['file'].file.read(), form['changelog'].value)
236 pg.save(form['savetext'].value.replace('\r\n', '\n'), form['changelog'].value)
238 elif 'cancel' in form:
239 pg.msg_text = 'Editing canceled'
240 pg.msg_type = 'notice'
242 else: # preview or edit
244 if 'preview' in form:
245 text = form['savetext'].value
248 def handle_get(pagename, form):
249 if not ext_re.search(pagename): # FIXME: no extension guesses a wiki page
250 Page(pagename).send()
252 # FIMXE: this is all bullshit, MimeTypes bases its guess on the extension!
253 from mimetypes import MimeTypes
254 mimetype, encoding = MimeTypes().guess_type(pagename)
255 Page(pagename).send_raw(mimetype=mimetype, args=form)
257 # Used by sys/macros/WordIndex and sys/macros/TitleIndex
258 def make_index_key():
259 links = ['<a href="#%s">%s</a>' % (ch, ch) for ch in 'abcdefghijklmnopqrstuvwxyz']
260 return '<p style="text-align: center">' + ' | '.join(links) + '</p>'
262 def page_list(dirname=None, search_re=None):
263 if search_re is None:
264 # FIXME: WikiWord is too restrictive now!
265 search_re = re.compile(r"^\b((([A-Z][a-z0-9]+){2,}/)*([A-Z][a-z0-9]+){2,})\b$")
266 return sorted(filter(search_re.match, os.listdir(dirname or '.')))
268 def _macro_ELAPSED_TIME(*args, **kvargs):
269 return "%03f" % (clock() - start_time)
271 def _macro_VERSION(*args, **kvargs):
275 """Object that turns Wiki markup into HTML."""
276 def __init__(self, raw, kvargs=None):
278 self.kvargs = kvargs or {}
280 self.in_pre = self.in_html = self.in_table = self.in_li = False
281 self.in_header = True
282 self.list_indents = [] # a list of pairs (indent_level, list_type) to track nested lists
290 "--": ["del", False],
291 "^^": ["sup", False],
292 ",,": ["sub", False],
293 "''": ["em", False], # LEGACY
294 "'''": ["b", False], # LEGACY
297 def _b_repl(self, word):
298 style = self.styles[word]
299 style[1] = not style[1]
300 return ['</', '<'][style[1]] + style[0] + '>'
302 def _glyph_repl(self, word):
305 def _tit_repl(self, word):
307 result = '</h%d><p>\n' % self.h_level
310 self.h_level = len(word) - 1
311 link = permalink(self.line)
312 result = '\n</p><h%d id="%s"><a class="heading" href="#%s">¶</a> ' % (self.h_level, link, link)
315 def _br_repl(self, word):
318 def _rule_repl(self, word):
319 return '\n<hr size="%d" noshade="noshade" />\n' % (len(word) - 2)
321 def _macro_repl(self, word):
323 args, kvargs = parse_args(word)
324 if args[0] in self.kvargs:
325 return self.kvargs[args[0]]
326 macro = globals().get('_macro_' + args[0])
328 exec(open("sys/macros/" + args[0] + ".py").read(), globals())
329 macro = globals().get('_macro_' + args[0])
330 return macro(*args, **kvargs)
332 msg = cgi.escape(word) + ": " + cgi.escape(str(e))
334 msg = '<strong class="error">' + msg + '</strong>'
337 def _hurl_repl(self, word):
338 args, kvargs = parse_args(word)
339 return link_tag(*args, **kvargs)
341 def _inl_repl(self, word):
342 args, kvargs = parse_args(word)
346 # This double div nonsense works around a limitation of the HTML block model
347 return '<div class="' + kvargs.get('class', 'thumb') + '">' \
348 + '<div class="innerthumb">' \
349 + link_inline_glob(name, descr, kvargs) \
350 + '<div class="caption">' + descr + '</div></div></div>'
352 return link_inline_glob(name, None, kvargs)
354 def _html_repl(self, word):
355 if not self.in_html and word.startswith('<div'): word = '</p>' + word
357 return word; # Pass through
359 def _htmle_repl(self, word):
361 if not self.in_html and word.startswith('</div'): word += '<p>'
362 return word; # Pass through
364 def _ent_repl(self, s):
366 return s; # Pass through
367 return {'&': '&',
371 def _img_repl(self, word): # LEGACY
372 return self._inl_repl('{{' + word + '}}')
374 def _word_repl(self, word): # LEGACY
375 if self.in_html: return word # pass through
376 return link_tag(word)
378 def _url_repl(self, word): # LEGACY
379 if self.in_html: return word # pass through
380 return link_tag(word)
382 def _email_repl(self, word): # LEGACY
383 if self.in_html: return word # pass through
384 return '<a href="mailto:%s">%s</a>' % (word, word)
386 def _li_repl(self, match):
393 def _pre_repl(self, word):
394 if word == '{{{' and not self.in_pre:
402 def _hi_repl(self, word):
403 return '<strong class="highlight ' + word + '">' + word + '</strong>'
405 def _tr_repl(self, word):
407 if not self.in_table:
410 out = '</p><table><tbody>\n'
412 out = out + '<tr class="' + ['even', 'odd'][self.tr_cnt % 2] + '">'
413 return out + ['<td>', '<th>'][word.strip() == '||=']
415 def _td_repl(self, word):
417 return ['</td><td>', '</th><th>'][word.strip() == '||=']
420 def _tre_repl(self, word):
422 return ['</td></tr>', '</th></tr>'][word.strip() == '||=']
425 def _indent_level(self):
426 return len(self.list_indents) and self.list_indents[-1][0]
428 def _indent_to(self, new_level, list_type=''):
429 if self._indent_level() == new_level:
432 while self._indent_level() > new_level:
435 self.in_li = False # FIXME
436 s += '</' + self.list_indents[-1][1] + '>\n'
437 del(self.list_indents[-1])
439 list_type = ('ul', 'ol')[list_type == '#']
440 while self._indent_level() < new_level:
441 self.list_indents.append((new_level, list_type))
442 s += '<' + list_type + '>\n'
446 def replace(self, match):
447 for rule, hit in list(match.groupdict().items()):
449 return getattr(self, '_' + rule + '_repl')(hit)
451 raise Exception("Can't handle match " + repr(match))
453 def print_html(self):
454 print('<div class="wiki"><p>')
456 scan_re = re.compile(r"""(?:
457 # Styles and formatting ("--" must cling to a word to disambiguate it from the dash)
458 (?P<b> \*\* | // | \#\# | __ | --\b | \b-- | \^\^ | ,, | ''' | '' )
462 | (?P<hi> \b( FIXME | TODO | DONE )\b )
466 | (?P<macro> \<\<[^\>]+\>\>)
467 | (?P<hurl> \[\[[^\]]+\]\])
470 | (?P<html> <(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])\b )
471 | (?P<htmle> ( /\s*> | </(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])> ) )
474 # Auto links (LEGACY)
475 | (?P<img> \b[a-zA-Z0-9_/-]+\.(""" + image_ext + "|" + video_ext + r"""))
476 | (?P<word> \b(?:[A-Z][a-z]+){2,}\b)
477 | (?P<url> (http|https|ftp|mailto)\:[^\s'\"]+\S)
478 | (?P<email> [-\w._+]+\@[\w.-]+)
480 # Lists, divs, spans and inline objects
481 | (?P<li> ^\s+[\*\#]\s+)
482 | (?P<pre> \{\{\{|\s*\}\}\})
483 | (?P<inl> \{\{[^\}]+\}\})
486 | (?P<tr> ^\s*\|\|(=|)\s*)
487 | (?P<tre> \s*\|\|(=|)\s*$)
488 | (?P<td> \s*\|\|(=|)\s*)
490 # TODO: highlight search words (look at referrer)
492 pre_re = re.compile("""(?:
496 blank_re = re.compile(r"^\s*$")
497 indent_re = re.compile(r"^(\s*)(\*|\#|)")
498 tr_re = re.compile(r"^\s*\|\|")
499 eol_re = re.compile(r"\r?\n")
500 # For each line, we scan through looking for magic strings, outputting verbatim any intervening text
501 #3.0: for self.line in eol_re.split(str(self.raw.expandtabs(), 'utf-8')):
502 for self.line in eol_re.split(str(self.raw.expandtabs())):
505 if self.line.startswith('#'):
507 self.in_header = False
510 print(re.sub(pre_re, self.replace, self.line))
512 if self.in_table and not tr_re.match(self.line):
513 self.in_table = False
514 print('</tbody></table><p>')
516 if blank_re.match(self.line):
519 indent = indent_re.match(self.line)
520 print(self._indent_to(len(indent.group(1)), indent.group(2)))
521 # Stand back! Here we apply the monster regex that does all the parsing
522 print(re.sub(scan_re, self.replace, self.line))
524 if self.in_pre: print('</pre>')
525 if self.in_table: print('</tbody></table><p>')
526 print(self._indent_to(0))
529 class HttpException(Exception):
530 def __init__(self, error, query):
535 def __init__(self, page_name="Limbo"):
536 self.page_name = page_name.rstrip('/');
538 self.msg_type = 'error'
539 if not file_re.match(self.page_name):
540 raise HttpException("403 Forbidden", self.page_name)
542 def split_title(self):
543 # look for the end of words and the start of a new word and insert a space there
544 return re.sub('([a-z])([A-Z])', r'\1 \2', self.page_name)
547 return self.page_name
549 def _tmp_filename(self):
550 return self.page_name + '.tmp' + str(os.getpid()) + '#'
554 return os.stat(self._filename()).st_mtime
556 if err.errno == errno.ENOENT:
565 def get_raw_body(self, default=None):
567 return open(self._filename(), 'rb').read()
569 if err.errno == errno.ENOENT:
571 default = '//[[?a=edit&q=%s|Describe %s]]//' % (self.page_name, self.page_name)
573 if err.errno == errno.EISDIR:
574 return self.format_dir()
577 def format_dir(self):
580 for dirname in self.page_name.strip('/').split('/'):
581 pathname = (pathname and pathname + '/' ) + dirname
582 out += '[[' + pathname + '|' + dirname + ']]/'
586 for filename in page_list(self._filename(), file_re):
587 if image_re.match(filename):
588 maxwidth = config_get('image_maxwidth', '400')
590 maxwidth = ' | maxwidth=' + str(maxwidth)
591 images_out += '{{' + self.page_name + '/' + filename + ' | ' + humanlink(filename) + maxwidth + ' | class=thumbleft}}\n'
593 out += ' * [[' + self.page_name + '/' + filename + ']]\n'
594 return out + images_out
597 if not '_pragmas' in self.__dict__:
600 file = open(self._filename(), 'rt')
601 attr_re = re.compile(r"^#(\S*)(.*)$")
603 m = attr_re.match(line)
606 self._pragmas[m.group(1)] = m.group(2).strip()
607 #print "bernie: pragmas[" + m.group(1) + "] = " + m.group(2) + "<br>\n"
609 if err.errno != errno.ENOENT and err.errno != errno.EISDIR:
613 def pragma(self, name, default):
614 return self.pragmas().get(name, default)
616 def can(self, action, default=True):
619 #acl SomeUser:read,write All:read
620 acl = self.pragma("acl", None)
621 for rule in acl.split():
622 (user, perms) = rule.split(':')
623 if user == remote_user() or user == "All":
624 return action in perms.split(',')
628 self.msg_text = 'Illegal acl line: ' + acl
632 return self.can("write", True)
635 return self.can("read", True)
637 def send_title(self, name=None, text="Limbo", msg_text=None, msg_type='error'):
639 if title_done: return
642 emit_header(self._mtime())
643 print('<!doctype html>\n<html lang="en">')
644 print("<head><title>%s: %s</title>" % (config_get('site_name', "Unconfigured Wiki"), text))
645 print(' <meta charset="UTF-8">')
647 print(' <meta name="robots" content="noindex,nofollow" />')
649 for http_equiv, content in config_get('meta_urls', {}):
650 print(' <meta http-equiv="%s" content="%s" />' % (http_equiv, relative_url(content)))
652 for link in config_get('link_urls', {}):
654 print(' <link rel="%s" href="%s" />' % (rel, relative_url(href)))
656 editable = name and self.can_write() and is_privileged()
658 print(' <link rel="alternate" type="application/x-wiki" title="Edit this page" href="%s" />' \
659 % relative_url('?a=edit&q=' + name, privileged=True))
661 history = config_get('history_url')
662 if history is not None:
663 print(' <link rel="alternate" type="application/rss+xml" title="RSS" href="%s" />' \
664 % relative_url(history + '?a=rss'))
670 print('<body ondblclick="location.href=\'' + relative_url('?a=edit&q=' + name, privileged=True) + '\'">')
675 send_guru(msg_text, msg_type)
677 if self.pragma("navbar", "on") != "on":
681 print('<nav><div class="nav">')
682 print link_tag('FrontPage', config_get('site_icon', 'Home'), cssclass='navlink')
684 print(' <b>' + link_tag('?a=titlesearch&q=' + name, text, cssclass='navlink') + '</b> ')
686 print(' <b>' + text + '</b> ')
687 print(' | ' + link_tag('FindPage', 'Find Page', cssclass='navlink'))
689 print(' | <a href="' + relative_url(history) + '" class="navlink">Recent Changes</a>')
691 print(' | <a href="' + relative_url(history + '?a=history;f=' + name) + '" class="navlink">Page History</a>')
694 print(' | ' + link_tag(name + '?a=raw', 'Raw Text', cssclass='navlink'))
695 if config_get('privileged_url') is not None:
697 print(' | ' + link_tag('?a=edit&q=' + name, 'Edit', cssclass='navlink', privileged=True))
699 print(' | ' + link_tag(name, 'Login', cssclass='navlink', privileged=True))
702 if user != 'AnonymousCoward':
703 print(' | <span class="login"><i><b>' + link_tag('User/' + user, user) + '</b></i></span>')
705 print('<hr /></div></nav>')
707 def send_footer(self):
708 if config_get('debug_cgi', False):
709 cgi.print_arguments()
712 footer = self.pragma("footer", "sys/footer")
714 link_inline(footer, kvargs = {
715 'LAST_MODIFIED': strftime(config_get('datetime_fmt', '%Y-%m-%dT%I:%M:%S%p'), localtime(self._mtime()))
717 print("</body></html>")
719 def send_naked(self, kvargs=None):
721 WikiFormatter(self.get_raw_body(), kvargs).print_html()
723 send_guru("Read access denied by ACLs", "notice")
727 value = self.pragma("css", None)
730 link_urls += [ [ "stylesheet", value ] ]
732 self.send_title(name=self.page_name, text=self.split_title(), msg_text=self.msg_text, msg_type=self.msg_type)
737 emit_header(self._mtime(), 'application/atom+xml')
739 link_inline("sys/atom_header", kvargs = {
740 'LAST_MODIFIED': strftime(config_get('datetime_fmt', '%a, %d %b %Y %I:%M:%S %p'), localtime(self._mtime()))
745 link_inline("sys/atom_footer")
748 def send_editor(self, preview=None):
749 self.send_title(text='Edit ' + self.split_title(), msg_text=self.msg_text, msg_type=self.msg_type)
750 if not self.can_write():
751 send_guru("Write access denied by ACLs", "error")
755 preview = self.get_raw_body(default='')
757 link_inline("sys/EditPage", kvargs = {
758 'EDIT_BODY': cgi.escape(preview),
759 #'EDIT_PREVIEW': WikiFormatter(preview).print_html(),
763 print("<div class='preview'>")
764 WikiFormatter(preview).print_html()
768 def send_raw(self, mimetype='text/plain', args=[]):
769 if not self.can_read():
770 self.send_title(msg_text='Read access denied by ACLs', msg_type='notice')
773 emit_header(self._mtime(), mimetype)
774 if 'maxwidth' in args:
777 subprocess.check_call(['gm', 'convert', self._filename(),
778 '-scale', args['maxwidth'].value + ' >', '-'])
780 body = self.get_raw_body()
783 def _write_file(self, data):
784 tmp_filename = self._tmp_filename()
785 open(tmp_filename, 'wb').write(data)
786 name = self._filename()
788 # Bad Bill! POSIX rename ought to replace. :-(
792 if err.errno != errno.ENOENT: raise err
793 path = os.path.split(name)[0]
794 if path and not os.path.exists(path):
796 os.rename(tmp_filename, name)
798 def save(self, newdata, changelog):
799 if not self.can_write():
800 self.msg_text = 'Write access denied by Access Control List'
802 if not is_privileged():
803 self.msg_text = 'Unauthenticated access denied'
806 self._write_file(newdata)
808 if config_get('post_edit_hook'):
811 config_get('post_edit_hook'),
812 self.page_name, remote_user(),
813 remote_host(), changelog ]
814 child = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True)
815 output = child.stdout.read()
818 self.msg_text += "Post-editing hook returned %d. Command was:\n'%s'\n" % (rc, "' '".join(cmd))
820 self.msg_text += 'Output follows:\n' + output
822 self.msg_text = 'Thank you for your contribution. Your attention to detail is appreciated.'
823 self.msg_type = 'success'
826 exec(open("geekigeeki.conf.py").read())
827 os.chdir(config_get('data_dir', 'data'))
828 form = cgi.FieldStorage()
829 action = form.getvalue('a', 'get')
830 handler = globals().get('handle_' + action)
832 handler(query_string(), form)
834 send_httperror("403 Forbidden", query_string())
836 except HttpException, e:
837 send_httperror(e.error, query=e.query)
839 send_httperror("500 Internal Server Error", query=query_string(), trace=True)