1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013, 2014 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de> 7 2004,2006 by Florian Festi 8 2006 by Mikko Virkkil 9 2005-2008 MoinMoin:ThomasWaldmann 10 2007 MoinMoin:ReimarBauer 11 2008 MoinMoin:FlorianKrupicka (redirectedOutput code) 12 @license: GNU GPL (v2 or later), see COPYING.txt for details. 13 """ 14 15 from DateSupport import * 16 from MoinMoin.parser import text_moin_wiki 17 from MoinMoin.Page import Page 18 from MoinMoin.support.python_compatibility import hash_new 19 from MoinMoin import caching, config, search, wikiutil 20 from shlex import shlex 21 import re 22 import time 23 import os 24 import codecs 25 26 try: 27 from cStringIO import StringIO 28 except ImportError: 29 from StringIO import StringIO 30 31 # Moin 1.9 request parameters. 32 33 try: 34 from MoinMoin.support.werkzeug.datastructures import MultiDict 35 except ImportError: 36 pass 37 38 __version__ = "0.5" 39 40 # Extraction of shared fragments. 41 42 marker_regexp_str = r"([{]{3,}|[}]{3,})" 43 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 44 45 # Extraction of headings. 46 47 heading_regexp_str = r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$" 48 heading_regexp = re.compile(heading_regexp_str, re.UNICODE | re.MULTILINE) 49 50 # Category extraction from pages. 51 52 category_regexp = None 53 54 # Simple content parsing. 55 56 verbatim_regexp = re.compile(ur'(?:' 57 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 58 ur'|' 59 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 60 ur'|' 61 ur'!(?P<verbatim3>.*?)(\s|$)?' 62 ur'|' 63 ur'`(?P<monospace>.*?)`' 64 ur'|' 65 ur'{{{(?P<preformatted>.*?)}}}' 66 ur')', re.UNICODE) 67 68 # Category discovery. 69 70 def getCategoryPattern(request): 71 global category_regexp 72 73 try: 74 return request.cfg.cache.page_category_regexact 75 except AttributeError: 76 77 # Use regular expression from MoinMoin 1.7.1 otherwise. 78 79 if category_regexp is None: 80 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 81 return category_regexp 82 83 def getCategories(request): 84 85 """ 86 From the AdvancedSearch macro, return a list of category page names using 87 the given 'request'. 88 """ 89 90 # This will return all pages with "Category" in the title. 91 92 cat_filter = getCategoryPattern(request).search 93 return request.rootpage.getPageList(filter=cat_filter) 94 95 def getCategoryMapping(category_pagenames, request): 96 97 """ 98 For the given 'category_pagenames' return a list of tuples of the form 99 (category name, category page name) using the given 'request'. 100 """ 101 102 cat_pattern = getCategoryPattern(request) 103 mapping = [] 104 for pagename in category_pagenames: 105 name = cat_pattern.match(pagename).group("key") 106 if name != "Category": 107 mapping.append((name, pagename)) 108 mapping.sort() 109 return mapping 110 111 def getCategoryPages(pagename, request): 112 113 """ 114 Return the pages associated with the given category 'pagename' using the 115 'request'. 116 """ 117 118 query = search.QueryParser().parse_query('category:%s' % pagename) 119 results = search.searchPages(request, query, "page_name") 120 return filterCategoryPages(results, request) 121 122 def filterCategoryPages(results, request): 123 124 "Filter category pages from the given 'results' using the 'request'." 125 126 cat_pattern = getCategoryPattern(request) 127 pages = [] 128 for page in results.hits: 129 if not cat_pattern.match(page.page_name): 130 pages.append(page) 131 return pages 132 133 def getAllCategoryPages(category_names, request): 134 135 """ 136 Return all pages belonging to the categories having the given 137 'category_names', using the given 'request'. 138 """ 139 140 pages = [] 141 pagenames = set() 142 143 for category_name in category_names: 144 145 # Get the pages and page names in the category. 146 147 pages_in_category = getCategoryPages(category_name, request) 148 149 # Visit each page in the category. 150 151 for page_in_category in pages_in_category: 152 pagename = page_in_category.page_name 153 154 # Only process each page once. 155 156 if pagename in pagenames: 157 continue 158 else: 159 pagenames.add(pagename) 160 161 pages.append(page_in_category) 162 163 return pages 164 165 def getPagesForSearch(search_pattern, request): 166 167 """ 168 Return result pages for a search employing the given 'search_pattern' and 169 using the given 'request'. 170 """ 171 172 query = search.QueryParser().parse_query(search_pattern) 173 results = search.searchPages(request, query, "page_name") 174 return filterCategoryPages(results, request) 175 176 # WikiDict functions. 177 178 def getWikiDict(pagename, request, superuser=False): 179 180 """ 181 Return the WikiDict provided by the given 'pagename' using the given 182 'request'. If the optional 'superuser' is specified as a true value, no read 183 access check will be made. 184 """ 185 186 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 187 if hasattr(request.dicts, "dict"): 188 return request.dicts.dict(pagename) 189 else: 190 return request.dicts[pagename] 191 else: 192 return None 193 194 def groupHasMember(request, groupname, username): 195 if hasattr(request.dicts, "has_member"): 196 return request.dicts.has_member(groupname, username) 197 else: 198 return username in request.groups.get(groupname, []) 199 200 # Searching-related functions. 201 202 def getPagesFromResults(result_pages, request): 203 204 "Return genuine pages for the given 'result_pages' using the 'request'." 205 206 return [Page(request, page.page_name) for page in result_pages] 207 208 # Region/section parsing. 209 210 def getRegions(s, include_non_regions=False): 211 212 """ 213 Parse the string 's', returning a list of explicitly declared regions. 214 215 If 'include_non_regions' is specified as a true value, fragments will be 216 included for text between explicitly declared regions. 217 """ 218 219 regions = [] 220 marker = None 221 is_block = True 222 223 # Start a region for exposed text, if appropriate. 224 225 if include_non_regions: 226 regions.append("") 227 228 for match_text in marker_regexp.split(s): 229 230 # Capture section text. 231 232 if is_block: 233 if marker or include_non_regions: 234 regions[-1] += match_text 235 236 # Handle section markers. 237 238 else: 239 240 # Close any open sections, returning to exposed text regions. 241 242 if marker: 243 244 # Add any marker to the current region, regardless of whether it 245 # successfully closes a section. 246 247 regions[-1] += match_text 248 249 if match_text.startswith("}") and len(marker) == len(match_text): 250 marker = None 251 252 # Start a region for exposed text, if appropriate. 253 254 if include_non_regions: 255 regions.append("") 256 257 # Without a current marker, start a new section. 258 259 else: 260 marker = match_text 261 regions.append("") 262 263 # Add the marker to the new region. 264 265 regions[-1] += match_text 266 267 # The match text alternates between text between markers and the markers 268 # themselves. 269 270 is_block = not is_block 271 272 return regions 273 274 def getFragmentsFromRegions(regions): 275 276 """ 277 Return fragments from the given 'regions', each having the form 278 (format, attributes, body text). 279 """ 280 281 fragments = [] 282 283 for region in regions: 284 format, attributes, body, header, close = getFragmentFromRegion(region) 285 fragments.append((format, attributes, body)) 286 287 return fragments 288 289 def getFragmentFromRegion(region): 290 291 """ 292 Return a fragment for the given 'region' having the form (format, 293 attributes, body text, header, close), where the 'header' is the original 294 declaration of the 'region' or None if no explicit region is defined, and 295 'close' is the closing marker of the 'region' or None if no explicit region 296 is defined. 297 """ 298 299 if region.startswith("{{{"): 300 301 body = region.lstrip("{") 302 level = len(region) - len(body) 303 body = body.rstrip("}").lstrip() 304 305 # Remove any prelude and process metadata. 306 307 if body.startswith("#!"): 308 309 try: 310 declaration, body = body.split("\n", 1) 311 except ValueError: 312 declaration = body 313 body = "" 314 315 arguments = declaration[2:] 316 317 # Get any parser/format declaration. 318 319 if arguments and not arguments[0].isspace(): 320 details = arguments.split(None, 1) 321 if len(details) == 2: 322 format, arguments = details 323 else: 324 format = details[0] 325 arguments = "" 326 else: 327 format = None 328 329 # Get the attributes/arguments for the region. 330 331 attributes = parseAttributes(arguments, False) 332 333 # Add an entry for the format in the attribute dictionary. 334 335 if format and not attributes.has_key(format): 336 attributes[format] = True 337 338 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 339 340 else: 341 return None, {}, body, level * "{" + "\n", level * "}" 342 343 else: 344 return None, {}, region, None, None 345 346 def getFragments(s, include_non_regions=False): 347 348 """ 349 Return fragments for the given string 's', each having the form 350 (format, arguments, body text). 351 352 If 'include_non_regions' is specified as a true value, fragments will be 353 included for text between explicitly declared regions. 354 """ 355 356 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 357 358 # Heading extraction. 359 360 def getHeadings(s): 361 362 """ 363 Return tuples of the form (level, title, span) for headings found within the 364 given string 's'. The span is itself a (start, end) tuple indicating the 365 matching region of 's' for a heading declaration. 366 """ 367 368 headings = [] 369 370 for match in heading_regexp.finditer(s): 371 headings.append( 372 (len(match.group("level")), match.group("heading"), match.span()) 373 ) 374 375 return headings 376 377 # Region/section attribute parsing. 378 379 def parseAttributes(s, escape=True): 380 381 """ 382 Parse the section attributes string 's', returning a mapping of names to 383 values. If 'escape' is set to a true value, the attributes will be suitable 384 for use with the formatter API. If 'escape' is set to a false value, the 385 attributes will have any quoting removed. 386 387 Because Unicode was probably not around when shlex, used here to tokenise 388 the attributes, was introduced, and since StringIO is not Unicode-capable, 389 any non-ASCII characters should be quoted in attributes. 390 """ 391 392 attrs = {} 393 f = StringIO(s.encode("utf-8")) 394 name = None 395 need_value = False 396 lex = shlex(f) 397 lex.wordchars += "-" 398 399 for token in lex: 400 token = unicode(token, "utf-8") 401 402 # Capture the name if needed. 403 404 if name is None: 405 name = escape and wikiutil.escape(token) or strip_token(token) 406 407 # Detect either an equals sign or another name. 408 409 elif not need_value: 410 if token == "=": 411 need_value = True 412 else: 413 attrs[name.lower()] = escape and "true" or True 414 name = wikiutil.escape(token) 415 416 # Otherwise, capture a value. 417 418 else: 419 # Quoting of attributes done similarly to wikiutil.parseAttributes. 420 421 if token: 422 if escape: 423 if token[0] in ("'", '"'): 424 token = wikiutil.escape(token) 425 else: 426 token = '"%s"' % wikiutil.escape(token, 1) 427 else: 428 token = strip_token(token) 429 430 attrs[name.lower()] = token 431 name = None 432 need_value = False 433 434 # Handle any name-only attributes at the end of the collection. 435 436 if name and not need_value: 437 attrs[name.lower()] = escape and "true" or True 438 439 return attrs 440 441 def strip_token(token): 442 443 "Return the given 'token' stripped of quoting." 444 445 if token[0] in ("'", '"') and token[-1] == token[0]: 446 return token[1:-1] 447 else: 448 return token 449 450 # Macro argument parsing. 451 452 def parseMacroArguments(args): 453 454 """ 455 Interpret the arguments. To support commas in labels, the label argument 456 should be quoted. For example: 457 458 "label=No, thanks!" 459 """ 460 461 try: 462 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 463 except AttributeError: 464 parsed_args = args.split(",") 465 466 pairs = [] 467 for arg in parsed_args: 468 if arg: 469 pair = arg.split("=", 1) 470 if len(pair) < 2: 471 pairs.append((None, arg)) 472 else: 473 pairs.append(tuple(pair)) 474 475 return pairs 476 477 def parseDictEntry(entry, unqualified=None): 478 479 """ 480 Return the parameters specified by the given dict 'entry' string. The 481 optional 'unqualified' parameter can be used to indicate parameters that 482 need not be specified together with a keyword and can therefore be populated 483 in the given order as such unqualified parameters are encountered. 484 485 NOTE: This is similar to parseMacroArguments but employs space as a 486 NOTE: separator and attempts to assign unqualified parameters. 487 """ 488 489 parameters = {} 490 unqualified = unqualified or () 491 492 try: 493 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 494 except AttributeError: 495 parsed_args = entry.split() 496 497 for arg in parsed_args: 498 try: 499 argname, argvalue = arg.split("=", 1) 500 501 # Detect unlikely parameter names. 502 503 if not argname.isalpha(): 504 raise ValueError 505 506 parameters[argname] = argvalue 507 508 # Unqualified parameters are assumed to be one of a recognised set. 509 510 except ValueError: 511 for argname in unqualified: 512 if not parameters.has_key(argname): 513 parameters[argname] = arg 514 break 515 516 return parameters 517 518 # Macro argument quoting. 519 520 def quoteMacroArguments(args): 521 522 """ 523 Quote the given 'args' - a collection of (name, value) tuples - returning a 524 string containing the comma-separated, quoted arguments. 525 """ 526 527 quoted = [] 528 529 for name, value in args: 530 quoted.append(quoteMacroArgument(name, value)) 531 532 return ",".join(quoted) 533 534 def quoteMacroArgument(name, value): 535 536 """ 537 Quote the argument with the given 'name' (or None indicating an unnamed 538 argument) and 'value' so that it can be used with a macro. 539 """ 540 541 value = unicode(value).replace('"', '""') 542 if name is None: 543 return '"%s"' % value 544 else: 545 return '"%s=%s"' % (name, value) 546 547 # Request-related classes and associated functions. 548 549 class Form: 550 551 """ 552 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 553 environment. 554 """ 555 556 def __init__(self, request): 557 self.request = request 558 self.form = request.values 559 560 def has_key(self, name): 561 return not not self.form.getlist(name) 562 563 def get(self, name, default=None): 564 values = self.form.getlist(name) 565 if not values: 566 return default 567 else: 568 return values 569 570 def __getitem__(self, name): 571 return self.form.getlist(name) 572 573 def __setitem__(self, name, value): 574 try: 575 self.form.setlist(name, value) 576 except TypeError: 577 self._write_enable() 578 self.form.setlist(name, value) 579 580 def __delitem__(self, name): 581 try: 582 del self.form[name] 583 except TypeError: 584 self._write_enable() 585 del self.form[name] 586 587 def _write_enable(self): 588 self.form = self.request.values = MultiDict(self.form) 589 590 def keys(self): 591 return self.form.keys() 592 593 def items(self): 594 return self.form.lists() 595 596 class ActionSupport: 597 598 """ 599 Work around disruptive MoinMoin changes in 1.9, and also provide useful 600 convenience methods. 601 """ 602 603 def get_form(self): 604 return get_form(self.request) 605 606 def _get_selected(self, value, input_value): 607 608 """ 609 Return the HTML attribute text indicating selection of an option (or 610 otherwise) if 'value' matches 'input_value'. 611 """ 612 613 return input_value is not None and value == input_value and 'selected="selected"' or '' 614 615 def _get_selected_for_list(self, value, input_values): 616 617 """ 618 Return the HTML attribute text indicating selection of an option (or 619 otherwise) if 'value' matches one of the 'input_values'. 620 """ 621 622 return value in input_values and 'selected="selected"' or '' 623 624 def get_option_list(self, value, values): 625 626 """ 627 Return a list of HTML element definitions for options describing the 628 given 'values', selecting the option with the specified 'value' if 629 present. 630 """ 631 632 options = [] 633 for available_value in values: 634 selected = self._get_selected(available_value, value) 635 options.append('<option value="%s" %s>%s</option>' % ( 636 escattr(available_value), selected, wikiutil.escape(available_value))) 637 return options 638 639 def _get_input(self, form, name, default=None): 640 641 """ 642 Return the input from 'form' having the given 'name', returning either 643 the input converted to an integer or the given 'default' (optional, None 644 if not specified). 645 """ 646 647 value = form.get(name, [None])[0] 648 if not value: # true if 0 obtained 649 return default 650 else: 651 return int(value) 652 653 def get_form(request): 654 655 "Work around disruptive MoinMoin changes in 1.9." 656 657 if hasattr(request, "values"): 658 return Form(request) 659 else: 660 return request.form 661 662 class send_headers_cls: 663 664 """ 665 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 666 1.9.x environment. 667 """ 668 669 def __init__(self, request): 670 self.request = request 671 672 def __call__(self, headers): 673 for header in headers: 674 parts = header.split(":") 675 self.request.headers.add(parts[0], ":".join(parts[1:])) 676 677 def get_send_headers(request): 678 679 "Return a function that can send response headers." 680 681 if hasattr(request, "http_headers"): 682 return request.http_headers 683 elif hasattr(request, "emit_http_headers"): 684 return request.emit_http_headers 685 else: 686 return send_headers_cls(request) 687 688 def escattr(s): 689 return wikiutil.escape(s, 1) 690 691 def getPathInfo(request): 692 if hasattr(request, "getPathinfo"): 693 return request.getPathinfo() 694 else: 695 return request.path 696 697 def getHeader(request, header_name, prefix=None): 698 699 """ 700 Using the 'request', return the value of the header with the given 701 'header_name', using the optional 'prefix' to obtain protocol-specific 702 headers if necessary. 703 704 If no value is found for the given 'header_name', None is returned. 705 """ 706 707 if hasattr(request, "getHeader"): 708 return request.getHeader(header_name) 709 elif hasattr(request, "headers"): 710 return request.headers.get(header_name) 711 elif hasattr(request, "env"): 712 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 713 else: 714 return None 715 716 def writeHeaders(request, mimetype, metadata, status=None): 717 718 """ 719 Using the 'request', write resource headers using the given 'mimetype', 720 based on the given 'metadata'. If the optional 'status' is specified, set 721 the status header to the given value. 722 """ 723 724 send_headers = get_send_headers(request) 725 726 # Define headers. 727 728 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 729 730 # Define the last modified time. 731 # NOTE: Consider using request.httpDate. 732 733 latest_timestamp = metadata.get("last-modified") 734 if latest_timestamp: 735 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 736 737 if status: 738 headers.append("Status: %s" % status) 739 740 send_headers(headers) 741 742 # Page access functions. 743 744 def getPageURL(page): 745 746 "Return the URL of the given 'page'." 747 748 request = page.request 749 return request.getQualifiedURL(page.url(request, relative=0)) 750 751 def getFormat(page): 752 753 "Get the format used on the given 'page'." 754 755 return page.pi["format"] 756 757 def getMetadata(page): 758 759 """ 760 Return a dictionary containing items describing for the given 'page' the 761 page's "created" time, "last-modified" time, "sequence" (or revision number) 762 and the "last-comment" made about the last edit. 763 """ 764 765 request = page.request 766 767 # Get the initial revision of the page. 768 769 revisions = page.getRevList() 770 771 if not revisions: 772 return {} 773 774 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 775 776 # Get the created and last modified times. 777 778 initial_revision = getPageRevision(event_page_initial) 779 780 metadata = {} 781 metadata["created"] = initial_revision["timestamp"] 782 latest_revision = getPageRevision(page) 783 metadata["last-modified"] = latest_revision["timestamp"] 784 metadata["sequence"] = len(revisions) - 1 785 metadata["last-comment"] = latest_revision["comment"] 786 787 return metadata 788 789 def getPageRevision(page): 790 791 "Return the revision details dictionary for the given 'page'." 792 793 # From Page.edit_info... 794 795 if hasattr(page, "editlog_entry"): 796 line = page.editlog_entry() 797 else: 798 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 799 800 # Similar to Page.mtime_usecs behaviour... 801 802 if line: 803 timestamp = line.ed_time_usecs 804 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 805 comment = line.comment 806 else: 807 mtime = 0 808 comment = "" 809 810 # Give the time zone as UTC. 811 812 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 813 814 # Page caching functions. 815 816 def getPageCacheKey(page, request): 817 818 """ 819 Return a cache key for the given 'page' using information in the 'request'. 820 """ 821 822 if hasattr(page, "getCacheKey"): 823 return page.getCacheKey(request) 824 825 key = getPageFormatterName(page, request) 826 if request.args: 827 args = request.args.items() 828 args.sort() 829 key_args = [] 830 for k, v in args: 831 key_args.append("%s=%s" % (k, wikiutil.url_quote(v))) 832 arg_str = "&".join(key_args) 833 key = "%s:%s" % (key, hash_new('sha1', arg_str).hexdigest()) 834 return key 835 836 def enforcePageCacheLimit(page, request): 837 838 """ 839 Prevent too many cache entries being stored for the given 'page', using the 840 'request' to obtain cache items and configuration details. 841 """ 842 843 if hasattr(page, "enforceCacheLimit"): 844 page.enforceCacheLimit(request) 845 846 keys = caching.get_cache_list(request, page, 'item') 847 try: 848 cache_limit = int(getattr(request.cfg, 'page_cache_limit', "10")) 849 except ValueError: 850 cache_limit = 10 851 852 if len(keys) >= cache_limit: 853 items = [caching.CacheEntry(request, page, key, scope='item') for key in keys] 854 item_ages = [(item.mtime(), item) for item in items] 855 item_ages.sort() 856 for item_age, item in item_ages[:-cache_limit]: 857 item.remove() 858 859 def getPageFormatterName(page, request=None): 860 861 """ 862 Return a formatter name as used in the caching system for the given 'page' 863 or using information provided by an optional 'request'. 864 """ 865 866 formatter = getattr(page, 'formatter', None) or request and getattr(request, 'formatter', None) 867 if not formatter: 868 return '' 869 module = formatter.__module__ 870 return module[module.rfind('.') + 1:] 871 872 # Page parsing and formatting of embedded content. 873 874 def getOutputTypes(request, format): 875 876 """ 877 Using the 'request' and the 'format' of a fragment, return the media types 878 available for the fragment. 879 """ 880 881 return getParserOutputTypes(getParserClass(request, format)) 882 883 def getParserOutputTypes(parser): 884 885 "Return the media types supported by the given 'parser'." 886 887 # This uses an extended parser API method if available. 888 889 if parser and hasattr(parser, "getOutputTypes"): 890 return parser.getOutputTypes() 891 else: 892 return ["text/html"] 893 894 def getPageParserClass(request): 895 896 "Using 'request', return a parser class for the current page's format." 897 898 return getParserClass(request, getFormat(request.page)) 899 900 def getParserClass(request, format): 901 902 """ 903 Return a parser class using the 'request' for the given 'format', returning 904 a plain text parser if no parser can be found for the specified 'format'. 905 """ 906 907 try: 908 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 909 except wikiutil.PluginMissingError: 910 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 911 912 def getFormatterClass(request, format): 913 914 """ 915 Return a formatter class using the 'request' for the given output 'format', 916 returning a plain text formatter if no formatter can be found for the 917 specified 'format'. 918 """ 919 920 try: 921 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 922 except wikiutil.PluginMissingError: 923 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 924 925 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 926 927 """ 928 Format the given 'text' using the specified 'request' and formatter 'fmt'. 929 Suppress line anchors in the output, and fix lists by indicating that a 930 paragraph has already been started. 931 """ 932 933 if not parser_cls: 934 parser_cls = getPageParserClass(request) 935 parser = parser_cls(text, request, line_anchors=False) 936 937 old_fmt = request.formatter 938 request.formatter = fmt 939 try: 940 if isinstance(parser, text_moin_wiki.Parser): 941 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 942 else: 943 return redirectedOutput(request, parser, fmt) 944 finally: 945 request.formatter = old_fmt 946 947 def formatTextForOutputType(text, request, parser_cls, output_type): 948 949 """ 950 Format the given 'text' using the specified 'request' and parser class 951 'parser_cls', producing output of the given 'output_type'. 952 """ 953 954 parser = parser_cls(text, request) 955 buf = codecs.getwriter("utf-8")(StringIO()) 956 try: 957 parser.formatForOutputType(output_type, buf.write) 958 return unicode(buf.getvalue(), "utf-8") 959 finally: 960 buf.close() 961 962 def redirectedOutput(request, parser, fmt, **kw): 963 964 "A fixed version of the request method of the same name." 965 966 buf = codecs.getwriter("utf-8")(StringIO()) 967 request.redirect(buf) 968 try: 969 parser.format(fmt, **kw) 970 if hasattr(fmt, "flush"): 971 buf.write(fmt.flush(True)) 972 finally: 973 request.redirect() 974 text = buf.getvalue() 975 buf.close() 976 return unicode(text, "utf-8") 977 978 class RawParser: 979 980 "A parser that just formats everything as text." 981 982 def __init__(self, raw, request, **kw): 983 self.raw = raw 984 self.request = request 985 986 def format(self, fmt, write=None): 987 (write or self.request.write)(fmt.text(self.raw)) 988 989 # Finding components for content types. 990 991 def getParsersForContentType(cfg, mimetype): 992 993 """ 994 Find parsers that support the given 'mimetype', constructing a dictionary 995 mapping content types to lists of parsers that is then cached in the 'cfg' 996 object. A list of suitable parsers is returned for 'mimetype'. 997 """ 998 999 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 1000 available = {} 1001 1002 for name in wikiutil.getPlugins("parser", cfg): 1003 1004 # Import each parser in order to inspect supported content types. 1005 1006 try: 1007 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 1008 except wikiutil.PluginMissingError: 1009 continue 1010 1011 # Attempt to determine supported content types. 1012 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 1013 # NOTE: used. 1014 1015 if hasattr(parser_cls, "input_mimetypes"): 1016 for input_mimetype in parser_cls.input_mimetypes: 1017 if not available.has_key(input_mimetype): 1018 available[input_mimetype] = [] 1019 available[input_mimetype].append(parser_cls) 1020 1021 # Support some basic parsers. 1022 1023 elif name == "text_moin_wiki": 1024 available["text/moin-wiki"] = [parser_cls] 1025 available["text/moin"] = [parser_cls] 1026 elif name == "text_html": 1027 available["text/html"] = [parser_cls] 1028 available["application/xhtml+xml"] = [parser_cls] 1029 1030 cfg.cache.MIMETYPE_TO_PARSER = available 1031 1032 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 1033 1034 # Textual representations. 1035 1036 def getSimpleWikiText(text): 1037 1038 """ 1039 Return the plain text representation of the given 'text' which may employ 1040 certain Wiki syntax features, such as those providing verbatim or monospaced 1041 text. 1042 """ 1043 1044 # NOTE: Re-implementing support for verbatim text and linking avoidance. 1045 1046 l = [] 1047 last = 0 1048 1049 for m in verbatim_regexp.finditer(text): 1050 start, end = m.span() 1051 l.append(text[last:start]) 1052 1053 # Process the verbatim macro arguments. 1054 1055 args = m.group("verbatim") or m.group("verbatim2") 1056 if args: 1057 l += [v for (n, v) in parseMacroArguments(args)] 1058 1059 # Or just add the match groups. 1060 1061 else: 1062 l += [s for s in m.groups() if s] 1063 1064 last = end 1065 1066 l.append(text[last:]) 1067 return "".join(l) 1068 1069 def getEncodedWikiText(text): 1070 1071 "Encode the given 'text' in a verbatim representation." 1072 1073 return "<<Verbatim(%s)>>" % quoteMacroArgument(None, text) 1074 1075 def getPrettyTitle(title): 1076 1077 "Return a nicely formatted version of the given 'title'." 1078 1079 return title.replace("_", " ").replace("/", u" ? ") 1080 1081 # User interface functions. 1082 1083 def getParameter(request, name, default=None): 1084 1085 """ 1086 Using the given 'request', return the value of the parameter with the given 1087 'name', returning the optional 'default' (or None) if no value was supplied 1088 in the 'request'. 1089 """ 1090 1091 return get_form(request).get(name, [default])[0] 1092 1093 def getQualifiedParameter(request, prefix, argname, default=None): 1094 1095 """ 1096 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1097 qualified parameter, returning the optional 'default' (or None) if no value 1098 was supplied in the 'request'. 1099 """ 1100 1101 argname = getQualifiedParameterName(prefix, argname) 1102 return getParameter(request, argname, default) 1103 1104 def getQualifiedParameterName(prefix, argname): 1105 1106 """ 1107 Return the qualified parameter name using the given 'prefix' and 'argname'. 1108 """ 1109 1110 if not prefix: 1111 return argname 1112 else: 1113 return "%s-%s" % (prefix, argname) 1114 1115 # Page-related functions. 1116 1117 def getPrettyPageName(page): 1118 1119 "Return a nicely formatted title/name for the given 'page'." 1120 1121 title = page.split_title(force=1) 1122 return getPrettyTitle(title) 1123 1124 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1125 1126 """ 1127 Using 'request', return a link to 'page' with the given link 'text' and 1128 optional 'query_string' and 'anchor'. 1129 """ 1130 1131 text = wikiutil.escape(text) 1132 return page.link_to_raw(request, text, query_string, anchor, **kw) 1133 1134 def linkToResource(url, request, text, query_string=None, anchor=None): 1135 1136 """ 1137 Using 'request', return a link to 'url' with the given link 'text' and 1138 optional 'query_string' and 'anchor'. 1139 """ 1140 1141 if anchor: 1142 url += "#%s" % anchor 1143 1144 if query_string: 1145 query_string = wikiutil.makeQueryString(query_string) 1146 url += "?%s" % query_string 1147 1148 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1149 1150 output = [] 1151 output.append(formatter.url(1, url)) 1152 output.append(formatter.text(text)) 1153 output.append(formatter.url(0)) 1154 return "".join(output) 1155 1156 def getFullPageName(parent, title): 1157 1158 """ 1159 Return a full page name from the given 'parent' page (can be empty or None) 1160 and 'title' (a simple page name). 1161 """ 1162 1163 if parent: 1164 return "%s/%s" % (parent.rstrip("/"), title) 1165 else: 1166 return title 1167 1168 # vim: tabstop=4 expandtab shiftwidth=4