ext_re = re.compile(r"\.([^\./]+)$")
# CGI stuff ---------------------------------------------------------
+def config_get(key, default=None):
+ return globals().get(key, default)
+
def script_name():
return os.environ.get('SCRIPT_NAME', '')
return os.environ.get('QUERY_STRING', '') or 'FrontPage'
def privileged_path():
- return privileged_url or script_name()
+ return config_get('privileged_url') or script_name()
def remote_user():
user = os.environ.get('REMOTE_USER', '')
def humanlink(s):
return re.sub(r'(?:.*[/:]|)([^:/\.]+)(?:\.[^/:]+|)$', r'\1', s.replace('_', ' '))
-# Split arg lists like "blah| blah blah| width=100 | align = center",
+# Split arg lists like "blah|blah blah| width=100 | align = center",
# return a list containing anonymous arguments and a map containing the named arguments
def parse_args(s):
args = []
- kwargs = {}
+ kvargs = {}
for arg in s.strip('<[{}]>').split('|'):
m = re.match('\s*(\w+)\s*=\s*(.+)\s*', arg)
if m is not None:
- kwargs[m.group(1)] = m.group(2)
+ kvargs[m.group(1)] = m.group(2)
else:
args.append(arg.strip())
- return (args, kwargs)
+ return (args, kvargs)
def url_args(kvargs):
argv = []
print(' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">')
print('<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">')
- print("<head><title>%s: %s</title>" % (site_name, text))
+ print("<head><title>%s: %s</title>" % (config_get('site_name', "Unconfigured Wiki"), text))
print(' <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8" />')
if not name:
print(' <meta name="robots" content="noindex,nofollow" />')
- for meta in meta_urls:
- http_equiv, content = meta
+ for http_equiv, content in config_get('meta_urls', {}):
print(' <meta http-equiv="%s" content="%s" />' % (http_equiv, relative_url(content)))
- for link in link_urls:
+ for link in config_get('link_urls', {}):
rel, href = link
print(' <link rel="%s" href="%s" />' % (rel, relative_url(href)))
- if name and writable and privileged_url is not None:
+ editable = name and writable and config_get('privileged_url') is not None
+ if editable:
print(' <link rel="alternate" type="application/x-wiki" title="Edit this page" href="%s" />' \
% (privileged_path() + '?a=edit&q=' + name))
- if history_url is not None:
+ history = config_get('history_url')
+ if history is not None:
print(' <link rel="alternate" type="application/rss+xml" title="RSS" href="%s" />' \
- % relative_url(history_url + '?a=rss'))
+ % relative_url(history + '?a=rss'))
print('</head>')
# Body
- if name and writable and privileged_url is not None:
+ if editable:
print('<body ondblclick="location.href=\'' + privileged_path() + '?a=edit&q=' + name + '\'">')
else:
print('<body>')
# Navbar
print('<div class="nav">')
- print link_tag('FrontPage', site_icon or 'Home', cssclass='navlink')
+ print link_tag('FrontPage', config_get('site_icon', 'Home'), cssclass='navlink')
if name:
print(' <b>' + link_tag('?fullsearch=' + name, text, cssclass='navlink') + '</b> ')
else:
print(' <b>' + text + '</b> ')
print(' | ' + link_tag('FindPage', 'Find Page', cssclass='navlink'))
- if 'history_url' in globals():
- print(' | <a href="' + relative_url(history_url) + '" class="navlink">Recent Changes</a>')
+ if history:
+ print(' | <a href="' + relative_url(history) + '" class="navlink">Recent Changes</a>')
if name:
- print(' | <a href="' + relative_url(history_url + '?a=history;f=' + name) + '" class="navlink">Page History</a>')
+ print(' | <a href="' + relative_url(history + '?a=history;f=' + name) + '" class="navlink">Page History</a>')
if name:
print(' | ' + link_tag(name + '?a=raw', 'Raw Text', cssclass='navlink'))
- if privileged_url is not None:
+ if config_get('privileged_url') is not None:
if writable:
print(' | ' + link_tag('?a=edit&q=' + name, 'Edit', cssclass='navlink', privileged=True))
else:
elif file_re.match(dest) and Page(dest).exists():
link_class = 'wikilink'
else:
- text = nonexist_pfx + text
+ text = config_get('nonexist_pfx', '') + text
link_class = 'nonexistent'
# Prevent crawlers from following links potentially added by spammers or to generated pages
elif image_re.match(name):
return '<a href="%s"><img border="0" src="%s" alt="%s" /></a>' % (url, url + url_args(kvargs), descr)
elif file_re.match(name) and not ext_re.search(name): # FIXME: this guesses a wiki page
- return Page(name).send_naked()
+ return Page(name).send_naked(kvargs)
else:
return '<iframe width="100%%" scrolling="auto" frameborder="0" src="%s"><a href="%s">%s</a></iframe>' \
% (url, url, name)
links = ['<a href="#%s">%s</a>' % (ch, ch) for ch in 'abcdefghijklmnopqrstuvwxyz']
return '<p style="text-align: center">' + ' | '.join(links) + '</p>'
-def page_list(dirname=None, re=None):
- if re is None:
+def page_list(dirname=None, search_re=None):
+ if search_re is None:
# FIXME: WikiWord is too restrictive now!
- re = re.compile(r"^\b((([A-Z][a-z0-9]+){2,}/)*([A-Z][a-z0-9]+){2,})\b$")
- return sorted(filter(re.match, os.listdir(dirname or data_dir)))
+ search_re = re.compile(r"^\b((([A-Z][a-z0-9]+){2,}/)*([A-Z][a-z0-9]+){2,})\b$")
+ return sorted(filter(search_re.match, os.listdir(dirname or config_get('data_dir', 'data'))))
def send_footer(mod_string=None):
- if globals().get('debug_cgi', False):
+ if config_get('debug_cgi', False):
cgi.print_arguments()
cgi.print_form(form)
cgi.print_environ()
- #FIXME link_inline("sys/footer")
- print('''
-<div id="footer"><hr />
-<p class="copyright">
-<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/"><img class="license" alt="Creative Commons License" src="%s" /></a>
-<span class="benchmark">generated in %0.3fs</span> by <a href="http://www.codewiz.org/wiki/GeekiGeeki">GeekiGeeki</a> version %s
-</p>
-''' % (relative_url('cc-by-sa.png'), clock() - start_time, __version__))
- if mod_string:
- print('<p class="modified">last modified %s</p>' % mod_string)
- print('</div></body></html>')
+ link_inline("sys/footer", kvargs= { 'LAST_MODIFIED': mod_string })
+ print("</body></html>")
+
+def _macro_ELAPSED_TIME(*args, **kvargs):
+ return "%03f" % (clock() - start_time)
+
+def _macro_VERSION(*args, **kvargs):
+ return __version__
class WikiFormatter:
"""Object that turns Wiki markup into HTML.
All formatting commands can be parsed one line at a time, though
some state is carried over between lines.
"""
- def __init__(self, raw):
+ def __init__(self, raw, kvargs=None):
self.raw = raw
+ self.kvargs = kvargs or {}
self.h_level = 0
self.in_pre = self.in_html = self.in_table = self.in_li = False
self.in_header = True
"**": ["b", False],
"##": ["tt", False],
"__": ["u", False],
+ "--": ["del", False],
"^^": ["sup", False],
",,": ["sub", False],
"''": ["em", False], # LEGACY
style[1] = not style[1]
return ['</', '<'][style[1]] + style[0] + '>'
+ def _glyph_repl(self, word):
+ return '—'
+
def _tit_repl(self, word):
if self.h_level:
result = '</h%d><p>\n' % self.h_level
def _macro_repl(self, word):
try:
- args, kwargs = parse_args(word)
+ args, kvargs = parse_args(word)
+ if args[0] in self.kvargs:
+ return self.kvargs[args[0]]
macro = globals().get('_macro_' + args[0])
if not macro:
- exec(open("macros/" + name + ".py").read(), globals())
- macro = globals().get('_macro_' + name)
- return macro(*args, **kwargs)
- except Exception:
- msg = cgi.escape(word)
+ exec(open("macros/" + args[0] + ".py").read(), globals())
+ macro = globals().get('_macro_' + args[0])
+ return macro(*args, **kvargs)
+ except Exception, e:
+ msg = cgi.escape(word) + ": " + cgi.escape(e.message)
if not self.in_html:
msg = '<strong class="error">' + msg + '</strong>'
return msg
if hit:
return getattr(self, '_' + rule + '_repl')(hit)
else:
- raise "Can't handle match " + repr(match)
+ raise Exception("Can't handle match " + repr(match))
def print_html(self):
print('<div class="wiki"><p>')
scan_re = re.compile(r"""(?:
- # Styles and formatting
- (?P<b> \*\*|'''|//|''|\#\#|``|__|\^\^|,,)
+ # Styles and formatting ("--" must cling to a word to disambiguate it from the dash)
+ (?P<b> \*\* | // | \#\# | __ | --\b | \b-- | \^\^ | ,, | ''' | '' | `` )
| (?P<tit> \={2,6})
| (?P<br> \\\\)
| (?P<rule> ^-{3,})
| (?P<hi> \b( FIXME | TODO | DONE )\b )
+ | (?P<glyph> --)
# Links
| (?P<macro> \<\<([^\s\|\>]+)(?:\s*\|\s*([^\>]+)|)\>\>)
| (?P<hurl> \[\[([^\s\|]+)(?:\s*\|\s*([^\]]+)|)\]\])
# Inline HTML
- | (?P<html> <(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])\b )
+ | (?P<html> <(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])\b )
| (?P<htmle> ( /\s*> | </(br|hr|div|span|form|iframe|input|textarea|a|img|h[1-5])> ) )
| (?P<ent> [<>&] )
return re.sub('([a-z])([A-Z])', r'\1 \2', self.page_name)
def _filename(self):
- return os.path.join(data_dir, self.page_name)
+ return os.path.join(config_get('data_dir', 'data'), self.page_name)
def _tmp_filename(self):
- return os.path.join(data_dir, ('#' + self.page_name.replace('/','_') + '.' + str(os.getpid()) + '#'))
+ return os.path.join(config_get('data_dir', 'data'), ('#' + self.page_name.replace('/','_') + '.' + str(os.getpid()) + '#'))
def exists(self):
try:
for filename in page_list(self._filename(), file_re):
if image_re.match(filename):
- if image_maxwidth:
- maxwidth_arg = ' | maxwidth=' + str(image_maxwidth)
- out += '{{' + self.page_name + '/' + filename + ' | ' + humanlink(filename) + maxwidth_arg + ' | class=thumbleft}}\n'
+ maxwidth = config_get(image_maxwidth, '')
+ if maxwidth:
+ maxwidth = ' | maxwidth=' + str(maxwidth)
+ out += '{{' + self.page_name + '/' + filename + ' | ' + humanlink(filename) + maxwidth + ' | class=thumbleft}}\n'
else:
out += ' * [[' + self.page_name + '/' + filename + ']]\n'
return out
def can_read(self):
return self.can("read", True)
- def send_naked(self):
+ def send_naked(self, kvargs=None):
if self.can_read():
- WikiFormatter(self.get_raw_body()).print_html()
+ WikiFormatter(self.get_raw_body(), kvargs).print_html()
else:
send_guru("Read access denied by ACLs", "notice")
if err.errno != errno.ENOENT:
raise err
return None
- return strftime(datetime_fmt, modtime)
+ return strftime(config_get(datetime_fmt, '%a %d %b %Y %I:%M %p'), modtime)
def send_editor(self, preview=None):
send_title(None, 'Edit ' + self.split_title(), msg_text=self.msg_text, msg_type=self.msg_type)
self._write_file(newdata)
rc = 0
- if post_edit_hook:
+ if config_get('post_edit_hook'):
import subprocess
- cmd = [ post_edit_hook, data_dir + '/' + self.page_name, remote_user(), remote_host(), changelog]
+ cmd = [
+ config_get('post_edit_hook'),
+ os.path.join(config_get('data_dir', 'data'), self.page_name), remote_user(),
+ remote_host(), changelog ]
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True)
output = child.stdout.read()
rc = child.wait()