')
- print link_tag('FrontPage', site_icon or 'Home', 'navlink')
+ print link_tag('FrontPage', site_icon or 'Home', cssclass='navlink')
if name:
- print(' ' + link_tag('?fullsearch=' + name, text, 'navlink') + ' ')
+ print(' ' + link_tag('?fullsearch=' + name, text, cssclass='navlink') + ' ')
else:
print(' ' + text + ' ')
- print(' | ' + link_tag('FindPage', 'Find Page', 'navlink'))
+ print(' | ' + link_tag('FindPage', 'Find Page', cssclass='navlink'))
if 'history_url' in globals():
print(' | Recent Changes')
if name:
print(' | Page History')
if name:
- print(' | ' + link_tag('?raw=' + name, 'Raw Text', 'navlink'))
+ print(' | ' + link_tag(name + '?a=raw', 'Raw Text', cssclass='navlink'))
if privileged_url is not None:
if writable:
- print(' | ' + link_tag('?edit=' + name, 'Edit', 'navlink', privileged=True))
+ print(' | ' + link_tag('?a=edit&q=' + name, 'Edit', cssclass='navlink', privileged=True))
else:
- print(' | ' + link_tag(name, 'Login', 'navlink', privileged=True))
+ print(' | ' + link_tag(name, 'Login', cssclass='navlink', privileged=True))
else:
print(' | Immutable Page')
@@ -175,35 +195,36 @@ def send_httperror(status="403 Not Found", query=""):
send_title(None, msg_text=("%s: on query '%s'" % (status, query)))
send_footer()
-def link_tag(params, text=None, link_class=None, privileged=False):
+def link_tag(dest, text=None, privileged=False, **kvargs):
if text is None:
- text = params # default
+ text = humanlink(dest)
elif img_re.match(text):
text = ''
+ link_class = kvargs.get('class', kvargs.get('cssclass', None))
if not link_class:
- if is_external_url(params):
+ if is_external_url(dest):
link_class = 'external'
- elif file_re.match(params) and Page(params).exists():
+ elif file_re.match(dest) and Page(dest).exists():
link_class = 'wikilink'
else:
- params = nonexist_pfx + params
+ text = nonexist_pfx + text
link_class = 'nonexistent'
- classattr = 'class="%s" ' % link_class
# Prevent crawlers from following links potentially added by spammers or to generated pages
+ nofollow = ''
if link_class == 'external' or link_class == 'navlink':
- classattr += 'rel="nofollow"'
+ nofollow = 'rel="nofollow" '
- return '%s' % (classattr, relative_url(params, privileged=privileged), text)
+ return '%s' % (link_class, nofollow, relative_url(dest, privileged=privileged), text)
-def link_inline(name, descr=None, args=''):
- if not descr: descr = name
+def link_inline(name, descr=None, kvargs={}):
+ if not descr: descr = humanlink(name)
url = relative_url(name)
if video_re.match(name):
- return '' % url
+ return '' % url
elif img_re.match(name):
- return '' % (url, url + args, descr)
+ return '' % (url, url + url_args(kvargs), descr)
elif file_re.match(name) and not ext_re.search(name): # FIXME: this guesses a wiki page
return Page(name).send_naked()
else:
@@ -212,8 +233,12 @@ def link_inline(name, descr=None, args=''):
# Search ---------------------------------------------------
-def handle_fullsearch(needle):
- send_title(None, 'Full text search for "%s"' % (needle))
+def print_search_stats(hits, searched):
+ print("
%d hits out of %d pages searched.
" % (hits, searched))
+
+def handle_fullsearch(query, form):
+ needle = form['q'].value
+ send_title(None, 'Full text search for "' + needle + '"')
needle_re = re.compile(needle, re.IGNORECASE)
hits = []
@@ -239,9 +264,9 @@ def handle_fullsearch(needle):
print_search_stats(len(hits), len(all_pages))
-def handle_titlesearch(needle):
- # TODO: check needle is legal -- but probably we can just accept any RE
- send_title(None, "Title search for \"" + needle + '"')
+def handle_titlesearch(query, form):
+ needle = form['q'].value
+ send_title(None, 'Title search for "' + needle + '"')
needle_re = re.compile(needle, re.IGNORECASE)
all_pages = page_list()
@@ -254,22 +279,19 @@ def handle_titlesearch(needle):
print_search_stats(len(hits), len(all_pages))
-def print_search_stats(hits, searched):
- print("
%d hits out of %d pages searched.
" % (hits, searched))
-
-def handle_raw(pagename):
+def handle_raw(pagename, form):
if not file_re.match(pagename):
send_httperror("403 Forbidden", pagename)
return
Page(pagename).send_raw()
-def handle_edit(pagename):
+def handle_edit(pagename, form):
if not file_re.match(pagename):
send_httperror("403 Forbidden", pagename)
return
- pg = Page(pagename)
+ pg = Page(form['q'].value)
if 'save' in form:
if form['file'].value:
pg.save(form['file'].file.read(), form['changelog'].value)
@@ -286,12 +308,27 @@ def handle_edit(pagename):
text = form['savetext'].value
pg.send_editor(text)
+def handle_get(pagename, form):
+ if file_re.match(pagename):
+ # FIMXE: this is all bullshit, MimeTypes bases its guess on the extension!
+ from mimetypes import MimeTypes
+ mimetype, encoding = MimeTypes().guess_type(pagename)
+ if mimetype:
+ Page(pagename).send_raw(mimetype=mimetype, args=form)
+ else:
+ Page(pagename).format()
+ else:
+ send_httperror("403 Forbidden", pagename)
+
# Used by macros/WordIndex and macros/TitleIndex
def make_index_key():
links = ['%s' % (ch, ch) for ch in 'abcdefghijklmnopqrstuvwxyz']
return '
' + ' | '.join(links) + '
'
-def page_list(dirname = None, re = word_re):
+def page_list(dirname=None, re=None):
+ if re is None:
+ # FIXME: WikiWord is too restrictive now!
+ re = re.compile(r"^\b((([A-Z][a-z0-9]+){2,}/)*([A-Z][a-z0-9]+){2,})\b$")
return sorted(filter(re.match, os.listdir(dirname or data_dir)))
def send_footer(mod_string=None):
@@ -358,48 +395,35 @@ class WikiFormatter:
return self._undent() + '\n\n' % (len(word) - 2)
def _macro_repl(self, word):
- m = re.compile("\<\<([^\s\|\>]+)(?:\s*\|\s*([^\>]+)|)\>\>").match(word)
- name = m.group(1)
- argv = [name]
- if m.group(2):
- argv.extend(m.group(2).split('|'))
- argv = list(map(str.strip, argv))
-
- macro = globals().get('_macro_' + name)
- if not macro:
- try:
+ try:
+ args, kwargs = parse_args(word)
+ macro = globals().get('_macro_' + args[0])
+ if not macro:
exec(open("macros/" + name + ".py").read(), globals())
- except IOError as err:
- if err.errno == errno.ENOENT: pass
- macro = globals().get('_macro_' + name)
- if macro:
- return macro(argv)
- else:
- msg = '<<' + '|'.join(argv) + '>>'
+ macro = globals().get('_macro_' + name)
+ return macro(*args, **kwargs)
+ except Exception:
+ msg = cgi.escape(word)
if not self.in_html:
msg = '' + msg + ''
return msg
def _hurl_repl(self, word):
- m = link_re.match(word)
- return link_tag(m.group(1), m.group(2))
+ args, kvargs = parse_args(word)
+ return link_tag(*args, **kvargs)
def _inl_repl(self, word):
- (name, descr) = link_re.match(word).groups()
-
- if descr:
- argv = descr.split('|')
- descr = argv.pop(0)
- args = ''
- if argv:
- args = '?' + '&'.join(argv)
-
- # The "extthumb" nonsense works around a limitation of the HTML block model
- return '
' \
- + link_inline(name, descr, args) \
+ args, kvargs = parse_args(word)
+ name = args.pop(0)
+ if len(args):
+ descr = args.pop(0)
+ # This double div nonsense works around a limitation of the HTML block model
+ return '
' \
+ + '
' \
+ + link_inline(name, descr, kvargs) \
+ '
' + descr + '
'
else:
- return link_inline(name, name)
+ return link_inline(name, None, kvargs)
def _html_repl(self, word):
if not self.in_html and word.startswith('
\n"
- except IOError as err:
+ except IOError, err:
if err.errno != errno.ENOENT and err.errno != errno.EISDIR:
raise er
return self._pragmas
@@ -697,7 +721,7 @@ class Page:
try:
from time import localtime, strftime
modtime = localtime(os.stat(self._filename())[stat.ST_MTIME])
- except OSError as err:
+ except OSError, err:
if err.errno != errno.ENOENT:
raise err
return None
@@ -718,7 +742,7 @@ class Page:
+ ' from ' + cgi.escape(get_hostname(remote_host()))
+ ''))
print('