1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de> 7 2004 by Florian Festi 8 2006 by Mikko Virkkil 9 2005-2008 MoinMoin:ThomasWaldmann 10 2007 MoinMoin:ReimarBauer 11 2008 MoinMoin:FlorianKrupicka (redirectedOutput code) 12 @license: GNU GPL (v2 or later), see COPYING.txt for details. 13 """ 14 15 from DateSupport import * 16 from ItemSupport import ItemDirectoryStore 17 from MoinMoin.parser import text_moin_wiki 18 from MoinMoin.Page import Page 19 from MoinMoin.util import lock 20 from MoinMoin import config, search, wikiutil 21 from StringIO import StringIO 22 from shlex import shlex 23 import re 24 import time 25 import os 26 27 # Moin 1.9 request parameters. 28 29 try: 30 from MoinMoin.support.werkzeug.datastructures import MultiDict 31 except ImportError: 32 pass 33 34 __version__ = "0.3" 35 36 # Extraction of shared fragments. 37 38 marker_regexp_str = r"([{]{3,}|[}]{3,})" 39 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 40 41 # Extraction of headings. 42 43 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 44 45 # Category extraction from pages. 46 47 category_regexp = None 48 49 # Simple content parsing. 50 51 verbatim_regexp = re.compile(ur'(?:' 52 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 53 ur'|' 54 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 55 ur'|' 56 ur'!(?P<verbatim3>.*?)(\s|$)?' 57 ur'|' 58 ur'`(?P<monospace>.*?)`' 59 ur'|' 60 ur'{{{(?P<preformatted>.*?)}}}' 61 ur')', re.UNICODE) 62 63 # Category discovery. 64 65 def getCategoryPattern(request): 66 global category_regexp 67 68 try: 69 return request.cfg.cache.page_category_regexact 70 except AttributeError: 71 72 # Use regular expression from MoinMoin 1.7.1 otherwise. 73 74 if category_regexp is None: 75 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 76 return category_regexp 77 78 def getCategories(request): 79 80 """ 81 From the AdvancedSearch macro, return a list of category page names using 82 the given 'request'. 83 """ 84 85 # This will return all pages with "Category" in the title. 86 87 cat_filter = getCategoryPattern(request).search 88 return request.rootpage.getPageList(filter=cat_filter) 89 90 def getCategoryMapping(category_pagenames, request): 91 92 """ 93 For the given 'category_pagenames' return a list of tuples of the form 94 (category name, category page name) using the given 'request'. 95 """ 96 97 cat_pattern = getCategoryPattern(request) 98 mapping = [] 99 for pagename in category_pagenames: 100 name = cat_pattern.match(pagename).group("key") 101 if name != "Category": 102 mapping.append((name, pagename)) 103 mapping.sort() 104 return mapping 105 106 def getCategoryPages(pagename, request): 107 108 """ 109 Return the pages associated with the given category 'pagename' using the 110 'request'. 111 """ 112 113 query = search.QueryParser().parse_query('category:%s' % pagename) 114 results = search.searchPages(request, query, "page_name") 115 return filterCategoryPages(results, request) 116 117 def filterCategoryPages(results, request): 118 119 "Filter category pages from the given 'results' using the 'request'." 120 121 cat_pattern = getCategoryPattern(request) 122 pages = [] 123 for page in results.hits: 124 if not cat_pattern.match(page.page_name): 125 pages.append(page) 126 return pages 127 128 def getAllCategoryPages(category_names, request): 129 130 """ 131 Return all pages belonging to the categories having the given 132 'category_names', using the given 'request'. 133 """ 134 135 pages = [] 136 pagenames = set() 137 138 for category_name in category_names: 139 140 # Get the pages and page names in the category. 141 142 pages_in_category = getCategoryPages(category_name, request) 143 144 # Visit each page in the category. 145 146 for page_in_category in pages_in_category: 147 pagename = page_in_category.page_name 148 149 # Only process each page once. 150 151 if pagename in pagenames: 152 continue 153 else: 154 pagenames.add(pagename) 155 156 pages.append(page_in_category) 157 158 return pages 159 160 def getPagesForSearch(search_pattern, request): 161 162 """ 163 Return result pages for a search employing the given 'search_pattern' and 164 using the given 'request'. 165 """ 166 167 query = search.QueryParser().parse_query(search_pattern) 168 results = search.searchPages(request, query, "page_name") 169 return filterCategoryPages(results, request) 170 171 # WikiDict functions. 172 173 def getWikiDict(pagename, request, superuser=False): 174 175 """ 176 Return the WikiDict provided by the given 'pagename' using the given 177 'request'. If the optional 'superuser' is specified as a true value, no read 178 access check will be made. 179 """ 180 181 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 182 if hasattr(request.dicts, "dict"): 183 return request.dicts.dict(pagename) 184 else: 185 return request.dicts[pagename] 186 else: 187 return None 188 189 # Searching-related functions. 190 191 def getPagesFromResults(result_pages, request): 192 193 "Return genuine pages for the given 'result_pages' using the 'request'." 194 195 return [Page(request, page.page_name) for page in result_pages] 196 197 # Region/section parsing. 198 199 def getRegions(s, include_non_regions=False): 200 201 """ 202 Parse the string 's', returning a list of explicitly declared regions. 203 204 If 'include_non_regions' is specified as a true value, fragments will be 205 included for text between explicitly declared regions. 206 """ 207 208 regions = [] 209 marker = None 210 is_block = True 211 212 # Start a region for exposed text, if appropriate. 213 214 if include_non_regions: 215 regions.append("") 216 217 for match_text in marker_regexp.split(s): 218 219 # Capture section text. 220 221 if is_block: 222 if marker or include_non_regions: 223 regions[-1] += match_text 224 225 # Handle section markers. 226 227 else: 228 229 # Close any open sections, returning to exposed text regions. 230 231 if marker: 232 233 # Add any marker to the current region, regardless of whether it 234 # successfully closes a section. 235 236 regions[-1] += match_text 237 238 if match_text.startswith("}") and len(marker) == len(match_text): 239 marker = None 240 241 # Start a region for exposed text, if appropriate. 242 243 if include_non_regions: 244 regions.append("") 245 246 # Without a current marker, start a new section. 247 248 else: 249 marker = match_text 250 regions.append("") 251 252 # Add the marker to the new region. 253 254 regions[-1] += match_text 255 256 # The match text alternates between text between markers and the markers 257 # themselves. 258 259 is_block = not is_block 260 261 return regions 262 263 def getFragmentsFromRegions(regions): 264 265 """ 266 Return fragments from the given 'regions', each having the form 267 (format, attributes, body text). 268 """ 269 270 fragments = [] 271 272 for region in regions: 273 format, attributes, body, header, close = getFragmentFromRegion(region) 274 fragments.append((format, attributes, body)) 275 276 return fragments 277 278 def getFragmentFromRegion(region): 279 280 """ 281 Return a fragment for the given 'region' having the form (format, 282 attributes, body text, header, close), where the 'header' is the original 283 declaration of the 'region' or None if no explicit region is defined, and 284 'close' is the closing marker of the 'region' or None if no explicit region 285 is defined. 286 """ 287 288 if region.startswith("{{{"): 289 290 body = region.lstrip("{") 291 level = len(region) - len(body) 292 body = body.rstrip("}").lstrip() 293 294 # Remove any prelude and process metadata. 295 296 if body.startswith("#!"): 297 298 try: 299 declaration, body = body.split("\n", 1) 300 except ValueError: 301 declaration = body 302 body = "" 303 304 arguments = declaration[2:] 305 306 # Get any parser/format declaration. 307 308 if arguments and not arguments[0].isspace(): 309 details = arguments.split(None, 1) 310 if len(details) == 2: 311 format, arguments = details 312 else: 313 format = details[0] 314 arguments = "" 315 else: 316 format = None 317 318 # Get the attributes/arguments for the region. 319 320 attributes = parseAttributes(arguments, False) 321 322 # Add an entry for the format in the attribute dictionary. 323 324 if format and not attributes.has_key(format): 325 attributes[format] = True 326 327 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 328 329 else: 330 return None, {}, body, level * "{" + "\n", level * "}" 331 332 else: 333 return None, {}, region, None, None 334 335 def getFragments(s, include_non_regions=False): 336 337 """ 338 Return fragments for the given string 's', each having the form 339 (format, arguments, body text). 340 341 If 'include_non_regions' is specified as a true value, fragments will be 342 included for text between explicitly declared regions. 343 """ 344 345 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 346 347 # Heading extraction. 348 349 def getHeadings(s): 350 351 """ 352 Return tuples of the form (level, title, span) for headings found within the 353 given string 's'. The span is itself a (start, end) tuple indicating the 354 matching region of 's' for a heading declaration. 355 """ 356 357 headings = [] 358 359 for match in heading_regexp.finditer(s): 360 headings.append( 361 (len(match.group("level")), match.group("heading"), match.span()) 362 ) 363 364 return headings 365 366 # Region/section attribute parsing. 367 368 def parseAttributes(s, escape=True): 369 370 """ 371 Parse the section attributes string 's', returning a mapping of names to 372 values. If 'escape' is set to a true value, the attributes will be suitable 373 for use with the formatter API. If 'escape' is set to a false value, the 374 attributes will have any quoting removed. 375 """ 376 377 attrs = {} 378 f = StringIO(s) 379 name = None 380 need_value = False 381 lex = shlex(f) 382 lex.wordchars += "-" 383 384 for token in lex: 385 386 # Capture the name if needed. 387 388 if name is None: 389 name = escape and wikiutil.escape(token) or strip_token(token) 390 391 # Detect either an equals sign or another name. 392 393 elif not need_value: 394 if token == "=": 395 need_value = True 396 else: 397 attrs[name.lower()] = escape and "true" or True 398 name = wikiutil.escape(token) 399 400 # Otherwise, capture a value. 401 402 else: 403 # Quoting of attributes done similarly to wikiutil.parseAttributes. 404 405 if token: 406 if escape: 407 if token[0] in ("'", '"'): 408 token = wikiutil.escape(token) 409 else: 410 token = '"%s"' % wikiutil.escape(token, 1) 411 else: 412 token = strip_token(token) 413 414 attrs[name.lower()] = token 415 name = None 416 need_value = False 417 418 # Handle any name-only attributes at the end of the collection. 419 420 if name and not need_value: 421 attrs[name.lower()] = escape and "true" or True 422 423 return attrs 424 425 def strip_token(token): 426 427 "Return the given 'token' stripped of quoting." 428 429 if token[0] in ("'", '"') and token[-1] == token[0]: 430 return token[1:-1] 431 else: 432 return token 433 434 # Macro argument parsing. 435 436 def parseMacroArguments(args): 437 438 """ 439 Interpret the arguments. To support commas in labels, the label argument 440 should be quoted. For example: 441 442 "label=No, thanks!" 443 """ 444 445 try: 446 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 447 except AttributeError: 448 parsed_args = args.split(",") 449 450 pairs = [] 451 for arg in parsed_args: 452 if arg: 453 pair = arg.split("=", 1) 454 if len(pair) < 2: 455 pairs.append((None, arg)) 456 else: 457 pairs.append(tuple(pair)) 458 459 return pairs 460 461 def parseDictEntry(entry, unqualified=None): 462 463 """ 464 Return the parameters specified by the given dict 'entry' string. The 465 optional 'unqualified' parameter can be used to indicate parameters that 466 need not be specified together with a keyword and can therefore be populated 467 in the given order as such unqualified parameters are encountered. 468 469 NOTE: This is similar to parseMacroArguments but employs space as a 470 NOTE: separator and attempts to assign unqualified parameters. 471 """ 472 473 parameters = {} 474 unqualified = unqualified or () 475 476 try: 477 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 478 except AttributeError: 479 parsed_args = entry.split() 480 481 for arg in parsed_args: 482 try: 483 argname, argvalue = arg.split("=", 1) 484 485 # Detect unlikely parameter names. 486 487 if not argname.isalpha(): 488 raise ValueError 489 490 parameters[argname] = argvalue 491 492 # Unqualified parameters are assumed to be one of a recognised set. 493 494 except ValueError: 495 for argname in unqualified: 496 if not parameters.has_key(argname): 497 parameters[argname] = arg 498 break 499 500 return parameters 501 502 # Request-related classes and associated functions. 503 504 class Form: 505 506 """ 507 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 508 environment. 509 """ 510 511 def __init__(self, request): 512 self.request = request 513 self.form = request.values 514 515 def has_key(self, name): 516 return not not self.form.getlist(name) 517 518 def get(self, name, default=None): 519 values = self.form.getlist(name) 520 if not values: 521 return default 522 else: 523 return values 524 525 def __getitem__(self, name): 526 return self.form.getlist(name) 527 528 def __setitem__(self, name, value): 529 try: 530 self.form.setlist(name, value) 531 except TypeError: 532 self._write_enable() 533 self.form.setlist(name, value) 534 535 def __delitem__(self, name): 536 try: 537 del self.form[name] 538 except TypeError: 539 self._write_enable() 540 del self.form[name] 541 542 def _write_enable(self): 543 self.form = self.request.values = MultiDict(self.form) 544 545 def keys(self): 546 return self.form.keys() 547 548 def items(self): 549 return self.form.lists() 550 551 class ActionSupport: 552 553 """ 554 Work around disruptive MoinMoin changes in 1.9, and also provide useful 555 convenience methods. 556 """ 557 558 def get_form(self): 559 return get_form(self.request) 560 561 def _get_selected(self, value, input_value): 562 563 """ 564 Return the HTML attribute text indicating selection of an option (or 565 otherwise) if 'value' matches 'input_value'. 566 """ 567 568 return input_value is not None and value == input_value and 'selected="selected"' or '' 569 570 def _get_selected_for_list(self, value, input_values): 571 572 """ 573 Return the HTML attribute text indicating selection of an option (or 574 otherwise) if 'value' matches one of the 'input_values'. 575 """ 576 577 return value in input_values and 'selected="selected"' or '' 578 579 def get_option_list(self, value, values): 580 581 """ 582 Return a list of HTML element definitions for options describing the 583 given 'values', selecting the option with the specified 'value' if 584 present. 585 """ 586 587 options = [] 588 for available_value in values: 589 selected = self._get_selected(available_value, value) 590 options.append('<option value="%s" %s>%s</option>' % ( 591 escattr(available_value), selected, wikiutil.escape(available_value))) 592 return options 593 594 def _get_input(self, form, name, default=None): 595 596 """ 597 Return the input from 'form' having the given 'name', returning either 598 the input converted to an integer or the given 'default' (optional, None 599 if not specified). 600 """ 601 602 value = form.get(name, [None])[0] 603 if not value: # true if 0 obtained 604 return default 605 else: 606 return int(value) 607 608 def get_form(request): 609 610 "Work around disruptive MoinMoin changes in 1.9." 611 612 if hasattr(request, "values"): 613 return Form(request) 614 else: 615 return request.form 616 617 class send_headers_cls: 618 619 """ 620 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 621 1.9.x environment. 622 """ 623 624 def __init__(self, request): 625 self.request = request 626 627 def __call__(self, headers): 628 for header in headers: 629 parts = header.split(":") 630 self.request.headers.add(parts[0], ":".join(parts[1:])) 631 632 def get_send_headers(request): 633 634 "Return a function that can send response headers." 635 636 if hasattr(request, "http_headers"): 637 return request.http_headers 638 elif hasattr(request, "emit_http_headers"): 639 return request.emit_http_headers 640 else: 641 return send_headers_cls(request) 642 643 def escattr(s): 644 return wikiutil.escape(s, 1) 645 646 def getPathInfo(request): 647 if hasattr(request, "getPathinfo"): 648 return request.getPathinfo() 649 else: 650 return request.path 651 652 def getHeader(request, header_name, prefix=None): 653 654 """ 655 Using the 'request', return the value of the header with the given 656 'header_name', using the optional 'prefix' to obtain protocol-specific 657 headers if necessary. 658 659 If no value is found for the given 'header_name', None is returned. 660 """ 661 662 if hasattr(request, "getHeader"): 663 return request.getHeader(header_name) 664 elif hasattr(request, "headers"): 665 return request.headers.get(header_name) 666 elif hasattr(request, "env"): 667 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 668 else: 669 return None 670 671 def writeHeaders(request, mimetype, metadata, status=None): 672 673 """ 674 Using the 'request', write resource headers using the given 'mimetype', 675 based on the given 'metadata'. If the optional 'status' is specified, set 676 the status header to the given value. 677 """ 678 679 send_headers = get_send_headers(request) 680 681 # Define headers. 682 683 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 684 685 # Define the last modified time. 686 # NOTE: Consider using request.httpDate. 687 688 latest_timestamp = metadata.get("last-modified") 689 if latest_timestamp: 690 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 691 692 if status: 693 headers.append("Status: %s" % status) 694 695 send_headers(headers) 696 697 # Page access functions. 698 699 def getPageURL(page): 700 701 "Return the URL of the given 'page'." 702 703 request = page.request 704 return request.getQualifiedURL(page.url(request, relative=0)) 705 706 def getFormat(page): 707 708 "Get the format used on the given 'page'." 709 710 return page.pi["format"] 711 712 def getMetadata(page): 713 714 """ 715 Return a dictionary containing items describing for the given 'page' the 716 page's "created" time, "last-modified" time, "sequence" (or revision number) 717 and the "last-comment" made about the last edit. 718 """ 719 720 request = page.request 721 722 # Get the initial revision of the page. 723 724 revisions = page.getRevList() 725 726 if not revisions: 727 return {} 728 729 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 730 731 # Get the created and last modified times. 732 733 initial_revision = getPageRevision(event_page_initial) 734 735 metadata = {} 736 metadata["created"] = initial_revision["timestamp"] 737 latest_revision = getPageRevision(page) 738 metadata["last-modified"] = latest_revision["timestamp"] 739 metadata["sequence"] = len(revisions) - 1 740 metadata["last-comment"] = latest_revision["comment"] 741 742 return metadata 743 744 def getPageRevision(page): 745 746 "Return the revision details dictionary for the given 'page'." 747 748 # From Page.edit_info... 749 750 if hasattr(page, "editlog_entry"): 751 line = page.editlog_entry() 752 else: 753 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 754 755 # Similar to Page.mtime_usecs behaviour... 756 757 if line: 758 timestamp = line.ed_time_usecs 759 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 760 comment = line.comment 761 else: 762 mtime = 0 763 comment = "" 764 765 # Give the time zone as UTC. 766 767 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 768 769 # Page parsing and formatting of embedded content. 770 771 def getOutputTypes(request, format): 772 773 """ 774 Using the 'request' and the 'format' of a fragment, return the media types 775 available for the fragment. 776 """ 777 778 return getParserOutputTypes(getParserClass(request, format)) 779 780 def getParserOutputTypes(parser): 781 782 "Return the media types supported by the given 'parser'." 783 784 # This uses an extended parser API method if available. 785 786 if parser and hasattr(parser, "getOutputTypes"): 787 return parser.getOutputTypes() 788 else: 789 return ["text/html"] 790 791 def getPageParserClass(request): 792 793 "Using 'request', return a parser class for the current page's format." 794 795 return getParserClass(request, getFormat(request.page)) 796 797 def getParserClass(request, format): 798 799 """ 800 Return a parser class using the 'request' for the given 'format', returning 801 a plain text parser if no parser can be found for the specified 'format'. 802 """ 803 804 try: 805 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 806 except wikiutil.PluginMissingError: 807 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 808 809 def getFormatterClass(request, format): 810 811 """ 812 Return a formatter class using the 'request' for the given output 'format', 813 returning a plain text formatter if no formatter can be found for the 814 specified 'format'. 815 """ 816 817 try: 818 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 819 except wikiutil.PluginMissingError: 820 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 821 822 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 823 824 """ 825 Format the given 'text' using the specified 'request' and formatter 'fmt'. 826 Suppress line anchors in the output, and fix lists by indicating that a 827 paragraph has already been started. 828 """ 829 830 if not parser_cls: 831 parser_cls = getPageParserClass(request) 832 parser = parser_cls(text, request, line_anchors=False) 833 834 old_fmt = request.formatter 835 request.formatter = fmt 836 try: 837 if isinstance(parser, text_moin_wiki.Parser): 838 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 839 else: 840 return redirectedOutput(request, parser, fmt) 841 finally: 842 request.formatter = old_fmt 843 844 def formatTextForOutputType(text, request, parser_cls, output_type): 845 846 """ 847 Format the given 'text' using the specified 'request' and parser class 848 'parser_cls', producing output of the given 'output_type'. 849 """ 850 851 parser = parser_cls(text, request) 852 buf = StringIO() 853 try: 854 parser.formatForOutputType(output_type, buf.write) 855 return buf.getvalue() 856 finally: 857 buf.close() 858 859 def redirectedOutput(request, parser, fmt, **kw): 860 861 "A fixed version of the request method of the same name." 862 863 buf = StringIO() 864 request.redirect(buf) 865 try: 866 parser.format(fmt, **kw) 867 if hasattr(fmt, "flush"): 868 buf.write(fmt.flush(True)) 869 finally: 870 request.redirect() 871 text = buf.getvalue() 872 buf.close() 873 return text 874 875 # Finding components for content types. 876 877 def getParsersForContentType(cfg, mimetype): 878 879 """ 880 Find parsers that support the given 'mimetype', constructing a dictionary 881 mapping content types to lists of parsers that is then cached in the 'cfg' 882 object. A list of suitable parsers is returned for 'mimetype'. 883 """ 884 885 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 886 available = {} 887 888 for name in wikiutil.getPlugins("parser", cfg): 889 890 # Import each parser in order to inspect supported content types. 891 892 try: 893 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 894 except wikiutil.PluginMissingError: 895 continue 896 897 # Attempt to determine supported content types. 898 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 899 # NOTE: used. 900 901 if hasattr(parser_cls, "input_mimetypes"): 902 for input_mimetype in parser_cls.input_mimetypes: 903 if not available.has_key(input_mimetype): 904 available[input_mimetype] = [] 905 available[input_mimetype].append(parser_cls) 906 907 # Support some basic parsers. 908 909 elif name == "text_moin_wiki": 910 available["text/moin-wiki"] = [parser_cls] 911 available["text/moin"] = [parser_cls] 912 elif name == "text_html": 913 available["text/html"] = [parser_cls] 914 available["application/xhtml+xml"] = [parser_cls] 915 916 cfg.cache.MIMETYPE_TO_PARSER = available 917 918 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 919 920 # Textual representations. 921 922 def getSimpleWikiText(text): 923 924 """ 925 Return the plain text representation of the given 'text' which may employ 926 certain Wiki syntax features, such as those providing verbatim or monospaced 927 text. 928 """ 929 930 # NOTE: Re-implementing support for verbatim text and linking avoidance. 931 932 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 933 934 def getEncodedWikiText(text): 935 936 "Encode the given 'text' in a verbatim representation." 937 938 return "<<Verbatim(%s)>>" % text 939 940 def getPrettyTitle(title): 941 942 "Return a nicely formatted version of the given 'title'." 943 944 return title.replace("_", " ").replace("/", u" ? ") 945 946 # User interface functions. 947 948 def getParameter(request, name, default=None): 949 950 """ 951 Using the given 'request', return the value of the parameter with the given 952 'name', returning the optional 'default' (or None) if no value was supplied 953 in the 'request'. 954 """ 955 956 return get_form(request).get(name, [default])[0] 957 958 def getQualifiedParameter(request, prefix, argname, default=None): 959 960 """ 961 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 962 qualified parameter, returning the optional 'default' (or None) if no value 963 was supplied in the 'request'. 964 """ 965 966 argname = getQualifiedParameterName(prefix, argname) 967 return getParameter(request, argname, default) 968 969 def getQualifiedParameterName(prefix, argname): 970 971 """ 972 Return the qualified parameter name using the given 'prefix' and 'argname'. 973 """ 974 975 if not prefix: 976 return argname 977 else: 978 return "%s-%s" % (prefix, argname) 979 980 # Page-related functions. 981 982 def getPrettyPageName(page): 983 984 "Return a nicely formatted title/name for the given 'page'." 985 986 title = page.split_title(force=1) 987 return getPrettyTitle(title) 988 989 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 990 991 """ 992 Using 'request', return a link to 'page' with the given link 'text' and 993 optional 'query_string' and 'anchor'. 994 """ 995 996 text = wikiutil.escape(text) 997 return page.link_to_raw(request, text, query_string, anchor, **kw) 998 999 def linkToResource(url, request, text, query_string=None, anchor=None): 1000 1001 """ 1002 Using 'request', return a link to 'url' with the given link 'text' and 1003 optional 'query_string' and 'anchor'. 1004 """ 1005 1006 if anchor: 1007 url += "#%s" % anchor 1008 1009 if query_string: 1010 query_string = wikiutil.makeQueryString(query_string) 1011 url += "?%s" % query_string 1012 1013 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1014 1015 output = [] 1016 output.append(formatter.url(1, url)) 1017 output.append(formatter.text(text)) 1018 output.append(formatter.url(0)) 1019 return "".join(output) 1020 1021 def getFullPageName(parent, title): 1022 1023 """ 1024 Return a full page name from the given 'parent' page (can be empty or None) 1025 and 'title' (a simple page name). 1026 """ 1027 1028 if parent: 1029 return "%s/%s" % (parent.rstrip("/"), title) 1030 else: 1031 return title 1032 1033 # Content storage support. 1034 1035 class ItemStore(ItemDirectoryStore): 1036 1037 "A page-specific item store." 1038 1039 def __init__(self, page, item_dir="items", lock_dir="item_locks"): 1040 1041 "Initialise an item store for the given 'page'." 1042 1043 item_dir_path = tuple(item_dir.split("/")) 1044 lock_dir_path = tuple(lock_dir.split("/")) 1045 ItemDirectoryStore.__init__(self, page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 1046 self.page = page 1047 1048 def can_write(self): 1049 1050 """ 1051 Return whether the user associated with the request can write to the 1052 page owning this store. 1053 """ 1054 1055 user = self.page.request.user 1056 return user and user.may.write(self.page.page_name) 1057 1058 def can_read(self): 1059 1060 """ 1061 Return whether the user associated with the request can read from the 1062 page owning this store. 1063 """ 1064 1065 user = self.page.request.user 1066 return user and user.may.read(self.page.page_name) 1067 1068 def can_delete(self): 1069 1070 """ 1071 Return whether the user associated with the request can delete the 1072 page owning this store. 1073 """ 1074 1075 user = self.page.request.user 1076 return user and user.may.delete(self.page.page_name) 1077 1078 # High-level methods. 1079 1080 def append(self, item): 1081 1082 "Append the given 'item' to the store." 1083 1084 if not self.can_write(): 1085 return 1086 1087 ItemDirectoryStore.append(self, item) 1088 1089 def __len__(self): 1090 1091 "Return the number of items in the store." 1092 1093 if not self.can_read(): 1094 return 0 1095 1096 return ItemDirectoryStore.__len__(self) 1097 1098 def __getitem__(self, number): 1099 1100 "Return the item with the given 'number'." 1101 1102 if not self.can_read(): 1103 raise IndexError, number 1104 1105 return ItemDirectoryStore.__getitem__(self, number) 1106 1107 def __delitem__(self, number): 1108 1109 "Remove the item with the given 'number'." 1110 1111 if not self.can_delete(): 1112 return 1113 1114 return ItemDirectoryStore.__delitem__(self, number) 1115 1116 # vim: tabstop=4 expandtab shiftwidth=4