1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de> 7 2004 by Florian Festi 8 2006 by Mikko Virkkil 9 2005-2008 MoinMoin:ThomasWaldmann 10 2007 MoinMoin:ReimarBauer 11 2008 MoinMoin:FlorianKrupicka (redirectedOutput code) 12 @license: GNU GPL (v2 or later), see COPYING.txt for details. 13 """ 14 15 from DateSupport import * 16 from ItemSupport import ItemDirectoryStore 17 from MoinMoin.parser import text_moin_wiki 18 from MoinMoin.Page import Page 19 from MoinMoin.util import lock 20 from MoinMoin import config, search, wikiutil 21 from shlex import shlex 22 import re 23 import time 24 import os 25 import codecs 26 27 try: 28 from cStringIO import StringIO 29 except ImportError: 30 from StringIO import StringIO 31 32 # Moin 1.9 request parameters. 33 34 try: 35 from MoinMoin.support.werkzeug.datastructures import MultiDict 36 except ImportError: 37 pass 38 39 __version__ = "0.3" 40 41 # Extraction of shared fragments. 42 43 marker_regexp_str = r"([{]{3,}|[}]{3,})" 44 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 45 46 # Extraction of headings. 47 48 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 49 50 # Category extraction from pages. 51 52 category_regexp = None 53 54 # Simple content parsing. 55 56 verbatim_regexp = re.compile(ur'(?:' 57 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 58 ur'|' 59 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 60 ur'|' 61 ur'!(?P<verbatim3>.*?)(\s|$)?' 62 ur'|' 63 ur'`(?P<monospace>.*?)`' 64 ur'|' 65 ur'{{{(?P<preformatted>.*?)}}}' 66 ur')', re.UNICODE) 67 68 # Category discovery. 69 70 def getCategoryPattern(request): 71 global category_regexp 72 73 try: 74 return request.cfg.cache.page_category_regexact 75 except AttributeError: 76 77 # Use regular expression from MoinMoin 1.7.1 otherwise. 78 79 if category_regexp is None: 80 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 81 return category_regexp 82 83 def getCategories(request): 84 85 """ 86 From the AdvancedSearch macro, return a list of category page names using 87 the given 'request'. 88 """ 89 90 # This will return all pages with "Category" in the title. 91 92 cat_filter = getCategoryPattern(request).search 93 return request.rootpage.getPageList(filter=cat_filter) 94 95 def getCategoryMapping(category_pagenames, request): 96 97 """ 98 For the given 'category_pagenames' return a list of tuples of the form 99 (category name, category page name) using the given 'request'. 100 """ 101 102 cat_pattern = getCategoryPattern(request) 103 mapping = [] 104 for pagename in category_pagenames: 105 name = cat_pattern.match(pagename).group("key") 106 if name != "Category": 107 mapping.append((name, pagename)) 108 mapping.sort() 109 return mapping 110 111 def getCategoryPages(pagename, request): 112 113 """ 114 Return the pages associated with the given category 'pagename' using the 115 'request'. 116 """ 117 118 query = search.QueryParser().parse_query('category:%s' % pagename) 119 results = search.searchPages(request, query, "page_name") 120 return filterCategoryPages(results, request) 121 122 def filterCategoryPages(results, request): 123 124 "Filter category pages from the given 'results' using the 'request'." 125 126 cat_pattern = getCategoryPattern(request) 127 pages = [] 128 for page in results.hits: 129 if not cat_pattern.match(page.page_name): 130 pages.append(page) 131 return pages 132 133 def getAllCategoryPages(category_names, request): 134 135 """ 136 Return all pages belonging to the categories having the given 137 'category_names', using the given 'request'. 138 """ 139 140 pages = [] 141 pagenames = set() 142 143 for category_name in category_names: 144 145 # Get the pages and page names in the category. 146 147 pages_in_category = getCategoryPages(category_name, request) 148 149 # Visit each page in the category. 150 151 for page_in_category in pages_in_category: 152 pagename = page_in_category.page_name 153 154 # Only process each page once. 155 156 if pagename in pagenames: 157 continue 158 else: 159 pagenames.add(pagename) 160 161 pages.append(page_in_category) 162 163 return pages 164 165 def getPagesForSearch(search_pattern, request): 166 167 """ 168 Return result pages for a search employing the given 'search_pattern' and 169 using the given 'request'. 170 """ 171 172 query = search.QueryParser().parse_query(search_pattern) 173 results = search.searchPages(request, query, "page_name") 174 return filterCategoryPages(results, request) 175 176 # WikiDict functions. 177 178 def getWikiDict(pagename, request, superuser=False): 179 180 """ 181 Return the WikiDict provided by the given 'pagename' using the given 182 'request'. If the optional 'superuser' is specified as a true value, no read 183 access check will be made. 184 """ 185 186 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 187 if hasattr(request.dicts, "dict"): 188 return request.dicts.dict(pagename) 189 else: 190 return request.dicts[pagename] 191 else: 192 return None 193 194 # Searching-related functions. 195 196 def getPagesFromResults(result_pages, request): 197 198 "Return genuine pages for the given 'result_pages' using the 'request'." 199 200 return [Page(request, page.page_name) for page in result_pages] 201 202 # Region/section parsing. 203 204 def getRegions(s, include_non_regions=False): 205 206 """ 207 Parse the string 's', returning a list of explicitly declared regions. 208 209 If 'include_non_regions' is specified as a true value, fragments will be 210 included for text between explicitly declared regions. 211 """ 212 213 regions = [] 214 marker = None 215 is_block = True 216 217 # Start a region for exposed text, if appropriate. 218 219 if include_non_regions: 220 regions.append("") 221 222 for match_text in marker_regexp.split(s): 223 224 # Capture section text. 225 226 if is_block: 227 if marker or include_non_regions: 228 regions[-1] += match_text 229 230 # Handle section markers. 231 232 else: 233 234 # Close any open sections, returning to exposed text regions. 235 236 if marker: 237 238 # Add any marker to the current region, regardless of whether it 239 # successfully closes a section. 240 241 regions[-1] += match_text 242 243 if match_text.startswith("}") and len(marker) == len(match_text): 244 marker = None 245 246 # Start a region for exposed text, if appropriate. 247 248 if include_non_regions: 249 regions.append("") 250 251 # Without a current marker, start a new section. 252 253 else: 254 marker = match_text 255 regions.append("") 256 257 # Add the marker to the new region. 258 259 regions[-1] += match_text 260 261 # The match text alternates between text between markers and the markers 262 # themselves. 263 264 is_block = not is_block 265 266 return regions 267 268 def getFragmentsFromRegions(regions): 269 270 """ 271 Return fragments from the given 'regions', each having the form 272 (format, attributes, body text). 273 """ 274 275 fragments = [] 276 277 for region in regions: 278 format, attributes, body, header, close = getFragmentFromRegion(region) 279 fragments.append((format, attributes, body)) 280 281 return fragments 282 283 def getFragmentFromRegion(region): 284 285 """ 286 Return a fragment for the given 'region' having the form (format, 287 attributes, body text, header, close), where the 'header' is the original 288 declaration of the 'region' or None if no explicit region is defined, and 289 'close' is the closing marker of the 'region' or None if no explicit region 290 is defined. 291 """ 292 293 if region.startswith("{{{"): 294 295 body = region.lstrip("{") 296 level = len(region) - len(body) 297 body = body.rstrip("}").lstrip() 298 299 # Remove any prelude and process metadata. 300 301 if body.startswith("#!"): 302 303 try: 304 declaration, body = body.split("\n", 1) 305 except ValueError: 306 declaration = body 307 body = "" 308 309 arguments = declaration[2:] 310 311 # Get any parser/format declaration. 312 313 if arguments and not arguments[0].isspace(): 314 details = arguments.split(None, 1) 315 if len(details) == 2: 316 format, arguments = details 317 else: 318 format = details[0] 319 arguments = "" 320 else: 321 format = None 322 323 # Get the attributes/arguments for the region. 324 325 attributes = parseAttributes(arguments, False) 326 327 # Add an entry for the format in the attribute dictionary. 328 329 if format and not attributes.has_key(format): 330 attributes[format] = True 331 332 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 333 334 else: 335 return None, {}, body, level * "{" + "\n", level * "}" 336 337 else: 338 return None, {}, region, None, None 339 340 def getFragments(s, include_non_regions=False): 341 342 """ 343 Return fragments for the given string 's', each having the form 344 (format, arguments, body text). 345 346 If 'include_non_regions' is specified as a true value, fragments will be 347 included for text between explicitly declared regions. 348 """ 349 350 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 351 352 # Heading extraction. 353 354 def getHeadings(s): 355 356 """ 357 Return tuples of the form (level, title, span) for headings found within the 358 given string 's'. The span is itself a (start, end) tuple indicating the 359 matching region of 's' for a heading declaration. 360 """ 361 362 headings = [] 363 364 for match in heading_regexp.finditer(s): 365 headings.append( 366 (len(match.group("level")), match.group("heading"), match.span()) 367 ) 368 369 return headings 370 371 # Region/section attribute parsing. 372 373 def parseAttributes(s, escape=True): 374 375 """ 376 Parse the section attributes string 's', returning a mapping of names to 377 values. If 'escape' is set to a true value, the attributes will be suitable 378 for use with the formatter API. If 'escape' is set to a false value, the 379 attributes will have any quoting removed. 380 381 Because Unicode was probably not around when shlex, used here to tokenise 382 the attributes, was introduced, and since StringIO is not Unicode-capable, 383 any non-ASCII characters should be quoted in attributes. 384 """ 385 386 attrs = {} 387 f = StringIO(s.encode("utf-8")) 388 name = None 389 need_value = False 390 lex = shlex(f) 391 lex.wordchars += "-" 392 393 for token in lex: 394 token = unicode(token, "utf-8") 395 396 # Capture the name if needed. 397 398 if name is None: 399 name = escape and wikiutil.escape(token) or strip_token(token) 400 401 # Detect either an equals sign or another name. 402 403 elif not need_value: 404 if token == "=": 405 need_value = True 406 else: 407 attrs[name.lower()] = escape and "true" or True 408 name = wikiutil.escape(token) 409 410 # Otherwise, capture a value. 411 412 else: 413 # Quoting of attributes done similarly to wikiutil.parseAttributes. 414 415 if token: 416 if escape: 417 if token[0] in ("'", '"'): 418 token = wikiutil.escape(token) 419 else: 420 token = '"%s"' % wikiutil.escape(token, 1) 421 else: 422 token = strip_token(token) 423 424 attrs[name.lower()] = token 425 name = None 426 need_value = False 427 428 # Handle any name-only attributes at the end of the collection. 429 430 if name and not need_value: 431 attrs[name.lower()] = escape and "true" or True 432 433 return attrs 434 435 def strip_token(token): 436 437 "Return the given 'token' stripped of quoting." 438 439 if token[0] in ("'", '"') and token[-1] == token[0]: 440 return token[1:-1] 441 else: 442 return token 443 444 # Macro argument parsing. 445 446 def parseMacroArguments(args): 447 448 """ 449 Interpret the arguments. To support commas in labels, the label argument 450 should be quoted. For example: 451 452 "label=No, thanks!" 453 """ 454 455 try: 456 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 457 except AttributeError: 458 parsed_args = args.split(",") 459 460 pairs = [] 461 for arg in parsed_args: 462 if arg: 463 pair = arg.split("=", 1) 464 if len(pair) < 2: 465 pairs.append((None, arg)) 466 else: 467 pairs.append(tuple(pair)) 468 469 return pairs 470 471 def parseDictEntry(entry, unqualified=None): 472 473 """ 474 Return the parameters specified by the given dict 'entry' string. The 475 optional 'unqualified' parameter can be used to indicate parameters that 476 need not be specified together with a keyword and can therefore be populated 477 in the given order as such unqualified parameters are encountered. 478 479 NOTE: This is similar to parseMacroArguments but employs space as a 480 NOTE: separator and attempts to assign unqualified parameters. 481 """ 482 483 parameters = {} 484 unqualified = unqualified or () 485 486 try: 487 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 488 except AttributeError: 489 parsed_args = entry.split() 490 491 for arg in parsed_args: 492 try: 493 argname, argvalue = arg.split("=", 1) 494 495 # Detect unlikely parameter names. 496 497 if not argname.isalpha(): 498 raise ValueError 499 500 parameters[argname] = argvalue 501 502 # Unqualified parameters are assumed to be one of a recognised set. 503 504 except ValueError: 505 for argname in unqualified: 506 if not parameters.has_key(argname): 507 parameters[argname] = arg 508 break 509 510 return parameters 511 512 # Request-related classes and associated functions. 513 514 class Form: 515 516 """ 517 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 518 environment. 519 """ 520 521 def __init__(self, request): 522 self.request = request 523 self.form = request.values 524 525 def has_key(self, name): 526 return not not self.form.getlist(name) 527 528 def get(self, name, default=None): 529 values = self.form.getlist(name) 530 if not values: 531 return default 532 else: 533 return values 534 535 def __getitem__(self, name): 536 return self.form.getlist(name) 537 538 def __setitem__(self, name, value): 539 try: 540 self.form.setlist(name, value) 541 except TypeError: 542 self._write_enable() 543 self.form.setlist(name, value) 544 545 def __delitem__(self, name): 546 try: 547 del self.form[name] 548 except TypeError: 549 self._write_enable() 550 del self.form[name] 551 552 def _write_enable(self): 553 self.form = self.request.values = MultiDict(self.form) 554 555 def keys(self): 556 return self.form.keys() 557 558 def items(self): 559 return self.form.lists() 560 561 class ActionSupport: 562 563 """ 564 Work around disruptive MoinMoin changes in 1.9, and also provide useful 565 convenience methods. 566 """ 567 568 def get_form(self): 569 return get_form(self.request) 570 571 def _get_selected(self, value, input_value): 572 573 """ 574 Return the HTML attribute text indicating selection of an option (or 575 otherwise) if 'value' matches 'input_value'. 576 """ 577 578 return input_value is not None and value == input_value and 'selected="selected"' or '' 579 580 def _get_selected_for_list(self, value, input_values): 581 582 """ 583 Return the HTML attribute text indicating selection of an option (or 584 otherwise) if 'value' matches one of the 'input_values'. 585 """ 586 587 return value in input_values and 'selected="selected"' or '' 588 589 def get_option_list(self, value, values): 590 591 """ 592 Return a list of HTML element definitions for options describing the 593 given 'values', selecting the option with the specified 'value' if 594 present. 595 """ 596 597 options = [] 598 for available_value in values: 599 selected = self._get_selected(available_value, value) 600 options.append('<option value="%s" %s>%s</option>' % ( 601 escattr(available_value), selected, wikiutil.escape(available_value))) 602 return options 603 604 def _get_input(self, form, name, default=None): 605 606 """ 607 Return the input from 'form' having the given 'name', returning either 608 the input converted to an integer or the given 'default' (optional, None 609 if not specified). 610 """ 611 612 value = form.get(name, [None])[0] 613 if not value: # true if 0 obtained 614 return default 615 else: 616 return int(value) 617 618 def get_form(request): 619 620 "Work around disruptive MoinMoin changes in 1.9." 621 622 if hasattr(request, "values"): 623 return Form(request) 624 else: 625 return request.form 626 627 class send_headers_cls: 628 629 """ 630 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 631 1.9.x environment. 632 """ 633 634 def __init__(self, request): 635 self.request = request 636 637 def __call__(self, headers): 638 for header in headers: 639 parts = header.split(":") 640 self.request.headers.add(parts[0], ":".join(parts[1:])) 641 642 def get_send_headers(request): 643 644 "Return a function that can send response headers." 645 646 if hasattr(request, "http_headers"): 647 return request.http_headers 648 elif hasattr(request, "emit_http_headers"): 649 return request.emit_http_headers 650 else: 651 return send_headers_cls(request) 652 653 def escattr(s): 654 return wikiutil.escape(s, 1) 655 656 def getPathInfo(request): 657 if hasattr(request, "getPathinfo"): 658 return request.getPathinfo() 659 else: 660 return request.path 661 662 def getHeader(request, header_name, prefix=None): 663 664 """ 665 Using the 'request', return the value of the header with the given 666 'header_name', using the optional 'prefix' to obtain protocol-specific 667 headers if necessary. 668 669 If no value is found for the given 'header_name', None is returned. 670 """ 671 672 if hasattr(request, "getHeader"): 673 return request.getHeader(header_name) 674 elif hasattr(request, "headers"): 675 return request.headers.get(header_name) 676 elif hasattr(request, "env"): 677 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 678 else: 679 return None 680 681 def writeHeaders(request, mimetype, metadata, status=None): 682 683 """ 684 Using the 'request', write resource headers using the given 'mimetype', 685 based on the given 'metadata'. If the optional 'status' is specified, set 686 the status header to the given value. 687 """ 688 689 send_headers = get_send_headers(request) 690 691 # Define headers. 692 693 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 694 695 # Define the last modified time. 696 # NOTE: Consider using request.httpDate. 697 698 latest_timestamp = metadata.get("last-modified") 699 if latest_timestamp: 700 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 701 702 if status: 703 headers.append("Status: %s" % status) 704 705 send_headers(headers) 706 707 # Page access functions. 708 709 def getPageURL(page): 710 711 "Return the URL of the given 'page'." 712 713 request = page.request 714 return request.getQualifiedURL(page.url(request, relative=0)) 715 716 def getFormat(page): 717 718 "Get the format used on the given 'page'." 719 720 return page.pi["format"] 721 722 def getMetadata(page): 723 724 """ 725 Return a dictionary containing items describing for the given 'page' the 726 page's "created" time, "last-modified" time, "sequence" (or revision number) 727 and the "last-comment" made about the last edit. 728 """ 729 730 request = page.request 731 732 # Get the initial revision of the page. 733 734 revisions = page.getRevList() 735 736 if not revisions: 737 return {} 738 739 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 740 741 # Get the created and last modified times. 742 743 initial_revision = getPageRevision(event_page_initial) 744 745 metadata = {} 746 metadata["created"] = initial_revision["timestamp"] 747 latest_revision = getPageRevision(page) 748 metadata["last-modified"] = latest_revision["timestamp"] 749 metadata["sequence"] = len(revisions) - 1 750 metadata["last-comment"] = latest_revision["comment"] 751 752 return metadata 753 754 def getPageRevision(page): 755 756 "Return the revision details dictionary for the given 'page'." 757 758 # From Page.edit_info... 759 760 if hasattr(page, "editlog_entry"): 761 line = page.editlog_entry() 762 else: 763 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 764 765 # Similar to Page.mtime_usecs behaviour... 766 767 if line: 768 timestamp = line.ed_time_usecs 769 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 770 comment = line.comment 771 else: 772 mtime = 0 773 comment = "" 774 775 # Give the time zone as UTC. 776 777 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 778 779 # Page parsing and formatting of embedded content. 780 781 def getOutputTypes(request, format): 782 783 """ 784 Using the 'request' and the 'format' of a fragment, return the media types 785 available for the fragment. 786 """ 787 788 return getParserOutputTypes(getParserClass(request, format)) 789 790 def getParserOutputTypes(parser): 791 792 "Return the media types supported by the given 'parser'." 793 794 # This uses an extended parser API method if available. 795 796 if parser and hasattr(parser, "getOutputTypes"): 797 return parser.getOutputTypes() 798 else: 799 return ["text/html"] 800 801 def getPageParserClass(request): 802 803 "Using 'request', return a parser class for the current page's format." 804 805 return getParserClass(request, getFormat(request.page)) 806 807 def getParserClass(request, format): 808 809 """ 810 Return a parser class using the 'request' for the given 'format', returning 811 a plain text parser if no parser can be found for the specified 'format'. 812 """ 813 814 try: 815 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 816 except wikiutil.PluginMissingError: 817 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 818 819 def getFormatterClass(request, format): 820 821 """ 822 Return a formatter class using the 'request' for the given output 'format', 823 returning a plain text formatter if no formatter can be found for the 824 specified 'format'. 825 """ 826 827 try: 828 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 829 except wikiutil.PluginMissingError: 830 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 831 832 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 833 834 """ 835 Format the given 'text' using the specified 'request' and formatter 'fmt'. 836 Suppress line anchors in the output, and fix lists by indicating that a 837 paragraph has already been started. 838 """ 839 840 if not parser_cls: 841 parser_cls = getPageParserClass(request) 842 parser = parser_cls(text, request, line_anchors=False) 843 844 old_fmt = request.formatter 845 request.formatter = fmt 846 try: 847 if isinstance(parser, text_moin_wiki.Parser): 848 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 849 else: 850 return redirectedOutput(request, parser, fmt) 851 finally: 852 request.formatter = old_fmt 853 854 def formatTextForOutputType(text, request, parser_cls, output_type): 855 856 """ 857 Format the given 'text' using the specified 'request' and parser class 858 'parser_cls', producing output of the given 'output_type'. 859 """ 860 861 parser = parser_cls(text, request) 862 buf = codecs.getwriter("utf-8")(StringIO()) 863 try: 864 parser.formatForOutputType(output_type, buf.write) 865 return unicode(buf.getvalue(), "utf-8") 866 finally: 867 buf.close() 868 869 def redirectedOutput(request, parser, fmt, **kw): 870 871 "A fixed version of the request method of the same name." 872 873 buf = codecs.getwriter("utf-8")(StringIO()) 874 request.redirect(buf) 875 try: 876 parser.format(fmt, **kw) 877 if hasattr(fmt, "flush"): 878 buf.write(fmt.flush(True)) 879 finally: 880 request.redirect() 881 text = buf.getvalue() 882 buf.close() 883 return unicode(text, "utf-8") 884 885 # Finding components for content types. 886 887 def getParsersForContentType(cfg, mimetype): 888 889 """ 890 Find parsers that support the given 'mimetype', constructing a dictionary 891 mapping content types to lists of parsers that is then cached in the 'cfg' 892 object. A list of suitable parsers is returned for 'mimetype'. 893 """ 894 895 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 896 available = {} 897 898 for name in wikiutil.getPlugins("parser", cfg): 899 900 # Import each parser in order to inspect supported content types. 901 902 try: 903 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 904 except wikiutil.PluginMissingError: 905 continue 906 907 # Attempt to determine supported content types. 908 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 909 # NOTE: used. 910 911 if hasattr(parser_cls, "input_mimetypes"): 912 for input_mimetype in parser_cls.input_mimetypes: 913 if not available.has_key(input_mimetype): 914 available[input_mimetype] = [] 915 available[input_mimetype].append(parser_cls) 916 917 # Support some basic parsers. 918 919 elif name == "text_moin_wiki": 920 available["text/moin-wiki"] = [parser_cls] 921 available["text/moin"] = [parser_cls] 922 elif name == "text_html": 923 available["text/html"] = [parser_cls] 924 available["application/xhtml+xml"] = [parser_cls] 925 926 cfg.cache.MIMETYPE_TO_PARSER = available 927 928 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 929 930 # Textual representations. 931 932 def getSimpleWikiText(text): 933 934 """ 935 Return the plain text representation of the given 'text' which may employ 936 certain Wiki syntax features, such as those providing verbatim or monospaced 937 text. 938 """ 939 940 # NOTE: Re-implementing support for verbatim text and linking avoidance. 941 942 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 943 944 def getEncodedWikiText(text): 945 946 "Encode the given 'text' in a verbatim representation." 947 948 return "<<Verbatim(%s)>>" % text 949 950 def getPrettyTitle(title): 951 952 "Return a nicely formatted version of the given 'title'." 953 954 return title.replace("_", " ").replace("/", u" ? ") 955 956 # User interface functions. 957 958 def getParameter(request, name, default=None): 959 960 """ 961 Using the given 'request', return the value of the parameter with the given 962 'name', returning the optional 'default' (or None) if no value was supplied 963 in the 'request'. 964 """ 965 966 return get_form(request).get(name, [default])[0] 967 968 def getQualifiedParameter(request, prefix, argname, default=None): 969 970 """ 971 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 972 qualified parameter, returning the optional 'default' (or None) if no value 973 was supplied in the 'request'. 974 """ 975 976 argname = getQualifiedParameterName(prefix, argname) 977 return getParameter(request, argname, default) 978 979 def getQualifiedParameterName(prefix, argname): 980 981 """ 982 Return the qualified parameter name using the given 'prefix' and 'argname'. 983 """ 984 985 if not prefix: 986 return argname 987 else: 988 return "%s-%s" % (prefix, argname) 989 990 # Page-related functions. 991 992 def getPrettyPageName(page): 993 994 "Return a nicely formatted title/name for the given 'page'." 995 996 title = page.split_title(force=1) 997 return getPrettyTitle(title) 998 999 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1000 1001 """ 1002 Using 'request', return a link to 'page' with the given link 'text' and 1003 optional 'query_string' and 'anchor'. 1004 """ 1005 1006 text = wikiutil.escape(text) 1007 return page.link_to_raw(request, text, query_string, anchor, **kw) 1008 1009 def linkToResource(url, request, text, query_string=None, anchor=None): 1010 1011 """ 1012 Using 'request', return a link to 'url' with the given link 'text' and 1013 optional 'query_string' and 'anchor'. 1014 """ 1015 1016 if anchor: 1017 url += "#%s" % anchor 1018 1019 if query_string: 1020 query_string = wikiutil.makeQueryString(query_string) 1021 url += "?%s" % query_string 1022 1023 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1024 1025 output = [] 1026 output.append(formatter.url(1, url)) 1027 output.append(formatter.text(text)) 1028 output.append(formatter.url(0)) 1029 return "".join(output) 1030 1031 def getFullPageName(parent, title): 1032 1033 """ 1034 Return a full page name from the given 'parent' page (can be empty or None) 1035 and 'title' (a simple page name). 1036 """ 1037 1038 if parent: 1039 return "%s/%s" % (parent.rstrip("/"), title) 1040 else: 1041 return title 1042 1043 # Content storage support. 1044 1045 class ItemStore(ItemDirectoryStore): 1046 1047 "A page-specific item store." 1048 1049 def __init__(self, page, item_dir="items", lock_dir="item_locks"): 1050 1051 "Initialise an item store for the given 'page'." 1052 1053 item_dir_path = tuple(item_dir.split("/")) 1054 lock_dir_path = tuple(lock_dir.split("/")) 1055 ItemDirectoryStore.__init__(self, page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 1056 self.page = page 1057 1058 def can_write(self): 1059 1060 """ 1061 Return whether the user associated with the request can write to the 1062 page owning this store. 1063 """ 1064 1065 user = self.page.request.user 1066 return user and user.may.write(self.page.page_name) 1067 1068 def can_read(self): 1069 1070 """ 1071 Return whether the user associated with the request can read from the 1072 page owning this store. 1073 """ 1074 1075 user = self.page.request.user 1076 return user and user.may.read(self.page.page_name) 1077 1078 def can_delete(self): 1079 1080 """ 1081 Return whether the user associated with the request can delete the 1082 page owning this store. 1083 """ 1084 1085 user = self.page.request.user 1086 return user and user.may.delete(self.page.page_name) 1087 1088 # High-level methods. 1089 1090 def append(self, item): 1091 1092 "Append the given 'item' to the store." 1093 1094 if not self.can_write(): 1095 return 1096 1097 ItemDirectoryStore.append(self, item) 1098 1099 def __len__(self): 1100 1101 "Return the number of items in the store." 1102 1103 if not self.can_read(): 1104 return 0 1105 1106 return ItemDirectoryStore.__len__(self) 1107 1108 def __getitem__(self, number): 1109 1110 "Return the item with the given 'number'." 1111 1112 if not self.can_read(): 1113 raise IndexError, number 1114 1115 return ItemDirectoryStore.__getitem__(self, number) 1116 1117 def __delitem__(self, number): 1118 1119 "Remove the item with the given 'number'." 1120 1121 if not self.can_delete(): 1122 return 1123 1124 return ItemDirectoryStore.__delitem__(self, number) 1125 1126 # vim: tabstop=4 expandtab shiftwidth=4