1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Extraction of headings. 37 38 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 39 40 # Category extraction from pages. 41 42 category_regexp = None 43 44 # Simple content parsing. 45 46 verbatim_regexp = re.compile(ur'(?:' 47 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 48 ur'|' 49 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 50 ur'|' 51 ur'!(?P<verbatim3>.*?)(\s|$)?' 52 ur'|' 53 ur'`(?P<monospace>.*?)`' 54 ur'|' 55 ur'{{{(?P<preformatted>.*?)}}}' 56 ur')', re.UNICODE) 57 58 # Category discovery. 59 60 def getCategoryPattern(request): 61 global category_regexp 62 63 try: 64 return request.cfg.cache.page_category_regexact 65 except AttributeError: 66 67 # Use regular expression from MoinMoin 1.7.1 otherwise. 68 69 if category_regexp is None: 70 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 71 return category_regexp 72 73 def getCategories(request): 74 75 """ 76 From the AdvancedSearch macro, return a list of category page names using 77 the given 'request'. 78 """ 79 80 # This will return all pages with "Category" in the title. 81 82 cat_filter = getCategoryPattern(request).search 83 return request.rootpage.getPageList(filter=cat_filter) 84 85 def getCategoryMapping(category_pagenames, request): 86 87 """ 88 For the given 'category_pagenames' return a list of tuples of the form 89 (category name, category page name) using the given 'request'. 90 """ 91 92 cat_pattern = getCategoryPattern(request) 93 mapping = [] 94 for pagename in category_pagenames: 95 name = cat_pattern.match(pagename).group("key") 96 if name != "Category": 97 mapping.append((name, pagename)) 98 mapping.sort() 99 return mapping 100 101 def getCategoryPages(pagename, request): 102 103 """ 104 Return the pages associated with the given category 'pagename' using the 105 'request'. 106 """ 107 108 query = search.QueryParser().parse_query('category:%s' % pagename) 109 results = search.searchPages(request, query, "page_name") 110 return filterCategoryPages(results, request) 111 112 def filterCategoryPages(results, request): 113 114 "Filter category pages from the given 'results' using the 'request'." 115 116 cat_pattern = getCategoryPattern(request) 117 pages = [] 118 for page in results.hits: 119 if not cat_pattern.match(page.page_name): 120 pages.append(page) 121 return pages 122 123 def getAllCategoryPages(category_names, request): 124 125 """ 126 Return all pages belonging to the categories having the given 127 'category_names', using the given 'request'. 128 """ 129 130 pages = [] 131 pagenames = set() 132 133 for category_name in category_names: 134 135 # Get the pages and page names in the category. 136 137 pages_in_category = getCategoryPages(category_name, request) 138 139 # Visit each page in the category. 140 141 for page_in_category in pages_in_category: 142 pagename = page_in_category.page_name 143 144 # Only process each page once. 145 146 if pagename in pagenames: 147 continue 148 else: 149 pagenames.add(pagename) 150 151 pages.append(page_in_category) 152 153 return pages 154 155 def getPagesForSearch(search_pattern, request): 156 157 """ 158 Return result pages for a search employing the given 'search_pattern' and 159 using the given 'request'. 160 """ 161 162 query = search.QueryParser().parse_query(search_pattern) 163 results = search.searchPages(request, query, "page_name") 164 return filterCategoryPages(results, request) 165 166 # WikiDict functions. 167 168 def getWikiDict(pagename, request): 169 170 """ 171 Return the WikiDict provided by the given 'pagename' using the given 172 'request'. 173 """ 174 175 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 176 if hasattr(request.dicts, "dict"): 177 return request.dicts.dict(pagename) 178 else: 179 return request.dicts[pagename] 180 else: 181 return None 182 183 # Searching-related functions. 184 185 def getPagesFromResults(result_pages, request): 186 187 "Return genuine pages for the given 'result_pages' using the 'request'." 188 189 return [Page(request, page.page_name) for page in result_pages] 190 191 # Region/section parsing. 192 193 def getRegions(s, include_non_regions=False): 194 195 """ 196 Parse the string 's', returning a list of explicitly declared regions. 197 198 If 'include_non_regions' is specified as a true value, fragments will be 199 included for text between explicitly declared regions. 200 """ 201 202 regions = [] 203 marker = None 204 is_block = True 205 206 # Start a region for exposed text, if appropriate. 207 208 if include_non_regions: 209 regions.append("") 210 211 for match_text in marker_regexp.split(s): 212 213 # Capture section text. 214 215 if is_block: 216 if marker or include_non_regions: 217 regions[-1] += match_text 218 219 # Handle section markers. 220 221 elif not is_block: 222 223 # Close any open sections, returning to exposed text regions. 224 225 if marker: 226 if match_text.startswith("}") and len(marker) == len(match_text): 227 marker = None 228 229 # Start a region for exposed text, if appropriate. 230 231 if include_non_regions: 232 regions.append("") 233 234 # Without a current marker, start a section if an appropriate marker 235 # is given. 236 237 elif match_text.startswith("{"): 238 marker = match_text 239 regions.append("") 240 241 # Markers and section text are added to the current region. 242 243 regions[-1] += match_text 244 245 # The match text alternates between text between markers and the markers 246 # themselves. 247 248 is_block = not is_block 249 250 return regions 251 252 def getFragmentsFromRegions(regions): 253 254 """ 255 Return fragments from the given 'regions', each having the form 256 (format, arguments, body text). 257 """ 258 259 fragments = [] 260 261 for region in regions: 262 if region.startswith("{{{"): 263 264 body = region.lstrip("{").rstrip("}").lstrip() 265 266 # Remove any prelude and process metadata. 267 268 if body.startswith("#!"): 269 body = body[2:] 270 271 arguments, body = body.split("\n", 1) 272 273 # Get any parser/format declaration. 274 275 if arguments and not arguments[0].isspace(): 276 details = arguments.split(None, 1) 277 if len(details) == 2: 278 format, arguments = details 279 else: 280 format = details[0] 281 arguments = "" 282 else: 283 format = None 284 285 # Get the attributes/arguments for the region. 286 287 attributes = parseAttributes(arguments, False) 288 289 # Add an entry for the format in the attribute dictionary. 290 291 if format and not attributes.has_key(format): 292 attributes[format] = True 293 294 fragments.append((format, attributes, body)) 295 296 else: 297 fragments.append((None, {}, body)) 298 299 else: 300 fragments.append((None, {}, region)) 301 302 return fragments 303 304 def getFragments(s, include_non_regions=False): 305 306 """ 307 Return fragments for the given string 's', each having the form 308 (format, arguments, body text). 309 310 If 'include_non_regions' is specified as a true value, fragments will be 311 included for text between explicitly declared regions. 312 """ 313 314 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 315 316 # Heading extraction. 317 318 def getHeadings(s): 319 320 """ 321 Return tuples of the form (level, title, span) for headings found within the 322 given string 's'. The span is itself a (start, end) tuple indicating the 323 matching region of 's' for a heading declaration. 324 """ 325 326 headings = [] 327 328 for match in heading_regexp.finditer(s): 329 headings.append( 330 (len(match.group("level")), match.group("heading"), match.span()) 331 ) 332 333 return headings 334 335 # Region/section attribute parsing. 336 337 def parseAttributes(s, escape=True): 338 339 """ 340 Parse the section attributes string 's', returning a mapping of names to 341 values. If 'escape' is set to a true value, the attributes will be suitable 342 for use with the formatter API. If 'escape' is set to a false value, the 343 attributes will have any quoting removed. 344 """ 345 346 attrs = {} 347 f = StringIO(s) 348 name = None 349 need_value = False 350 351 for token in shlex(f): 352 353 # Capture the name if needed. 354 355 if name is None: 356 name = escape and wikiutil.escape(token) or strip_token(token) 357 358 # Detect either an equals sign or another name. 359 360 elif not need_value: 361 if token == "=": 362 need_value = True 363 else: 364 attrs[name.lower()] = escape and "true" or True 365 name = wikiutil.escape(token) 366 367 # Otherwise, capture a value. 368 369 else: 370 # Quoting of attributes done similarly to wikiutil.parseAttributes. 371 372 if token: 373 if escape: 374 if token[0] in ("'", '"'): 375 token = wikiutil.escape(token) 376 else: 377 token = '"%s"' % wikiutil.escape(token, 1) 378 else: 379 token = strip_token(token) 380 381 attrs[name.lower()] = token 382 name = None 383 need_value = False 384 385 # Handle any name-only attributes at the end of the collection. 386 387 if name and not need_value: 388 attrs[name.lower()] = escape and "true" or True 389 390 return attrs 391 392 def strip_token(token): 393 394 "Return the given 'token' stripped of quoting." 395 396 if token[0] in ("'", '"') and token[-1] == token[0]: 397 return token[1:-1] 398 else: 399 return token 400 401 # Request-related classes and associated functions. 402 403 class Form: 404 405 """ 406 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 407 environment. 408 """ 409 410 def __init__(self, form): 411 self.form = form 412 413 def has_key(self, name): 414 return not not self.form.getlist(name) 415 416 def get(self, name, default=None): 417 values = self.form.getlist(name) 418 if not values: 419 return default 420 else: 421 return values 422 423 def __getitem__(self, name): 424 return self.form.getlist(name) 425 426 class ActionSupport: 427 428 """ 429 Work around disruptive MoinMoin changes in 1.9, and also provide useful 430 convenience methods. 431 """ 432 433 def get_form(self): 434 return get_form(self.request) 435 436 def _get_selected(self, value, input_value): 437 438 """ 439 Return the HTML attribute text indicating selection of an option (or 440 otherwise) if 'value' matches 'input_value'. 441 """ 442 443 return input_value is not None and value == input_value and 'selected="selected"' or '' 444 445 def _get_selected_for_list(self, value, input_values): 446 447 """ 448 Return the HTML attribute text indicating selection of an option (or 449 otherwise) if 'value' matches one of the 'input_values'. 450 """ 451 452 return value in input_values and 'selected="selected"' or '' 453 454 def get_option_list(self, value, values): 455 456 """ 457 Return a list of HTML element definitions for options describing the 458 given 'values', selecting the option with the specified 'value' if 459 present. 460 """ 461 462 options = [] 463 for available_value in values: 464 selected = self._get_selected(available_value, value) 465 options.append('<option value="%s" %s>%s</option>' % ( 466 escattr(available_value), selected, wikiutil.escape(available_value))) 467 return options 468 469 def _get_input(self, form, name, default=None): 470 471 """ 472 Return the input from 'form' having the given 'name', returning either 473 the input converted to an integer or the given 'default' (optional, None 474 if not specified). 475 """ 476 477 value = form.get(name, [None])[0] 478 if not value: # true if 0 obtained 479 return default 480 else: 481 return int(value) 482 483 def get_form(request): 484 485 "Work around disruptive MoinMoin changes in 1.9." 486 487 if hasattr(request, "values"): 488 return Form(request.values) 489 else: 490 return request.form 491 492 class send_headers_cls: 493 494 """ 495 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 496 1.9.x environment. 497 """ 498 499 def __init__(self, request): 500 self.request = request 501 502 def __call__(self, headers): 503 for header in headers: 504 parts = header.split(":") 505 self.request.headers.add(parts[0], ":".join(parts[1:])) 506 507 def get_send_headers(request): 508 509 "Return a function that can send response headers." 510 511 if hasattr(request, "http_headers"): 512 return request.http_headers 513 elif hasattr(request, "emit_http_headers"): 514 return request.emit_http_headers 515 else: 516 return send_headers_cls(request) 517 518 def escattr(s): 519 return wikiutil.escape(s, 1) 520 521 def getPathInfo(request): 522 if hasattr(request, "getPathinfo"): 523 return request.getPathinfo() 524 else: 525 return request.path 526 527 def getHeader(request, header_name, prefix=None): 528 529 """ 530 Using the 'request', return the value of the header with the given 531 'header_name', using the optional 'prefix' to obtain protocol-specific 532 headers if necessary. 533 534 If no value is found for the given 'header_name', None is returned. 535 """ 536 537 if hasattr(request, "getHeader"): 538 return request.getHeader(header_name) 539 elif hasattr(request, "headers"): 540 return request.headers.get(header_name) 541 else: 542 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 543 544 def writeHeaders(request, mimetype, metadata, status=None): 545 546 """ 547 Using the 'request', write resource headers using the given 'mimetype', 548 based on the given 'metadata'. If the optional 'status' is specified, set 549 the status header to the given value. 550 """ 551 552 send_headers = get_send_headers(request) 553 554 # Define headers. 555 556 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 557 558 # Define the last modified time. 559 # NOTE: Consider using request.httpDate. 560 561 latest_timestamp = metadata.get("last-modified") 562 if latest_timestamp: 563 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 564 565 if status: 566 headers.append("Status: %s" % status) 567 568 send_headers(headers) 569 570 # Content/media type and preferences support. 571 572 class MediaRange: 573 574 "A content/media type value which supports whole categories of data." 575 576 def __init__(self, media_range, accept_parameters=None): 577 self.media_range = media_range 578 self.accept_parameters = accept_parameters or {} 579 580 parts = media_range.split(";") 581 self.media_type = parts[0] 582 self.parameters = getMappingFromParameterStrings(parts[1:]) 583 584 # The media type is divided into category and subcategory. 585 586 parts = self.media_type.split("/") 587 self.category = parts[0] 588 self.subcategory = "/".join(parts[1:]) 589 590 def get_parts(self): 591 592 "Return the category, subcategory parts." 593 594 return self.category, self.subcategory 595 596 def get_specificity(self): 597 598 """ 599 Return the specificity of the media type in terms of the scope of the 600 category and subcategory, and also in terms of any qualifying 601 parameters. 602 """ 603 604 if "*" in self.get_parts(): 605 return -list(self.get_parts()).count("*") 606 else: 607 return len(self.parameters) 608 609 def permits(self, other): 610 611 """ 612 Return whether this media type permits the use of the 'other' media type 613 if suggested as suitable content. 614 """ 615 616 if not isinstance(other, MediaRange): 617 other = MediaRange(other) 618 619 category = categoryPermits(self.category, other.category) 620 subcategory = categoryPermits(self.subcategory, other.subcategory) 621 622 if category and subcategory: 623 if "*" not in (category, subcategory): 624 return not self.parameters or self.parameters == other.parameters 625 else: 626 return True 627 else: 628 return False 629 630 def __eq__(self, other): 631 632 """ 633 Return whether this media type is effectively the same as the 'other' 634 media type. 635 """ 636 637 if not isinstance(other, MediaRange): 638 other = MediaRange(other) 639 640 category = categoryMatches(self.category, other.category) 641 subcategory = categoryMatches(self.subcategory, other.subcategory) 642 643 if category and subcategory: 644 if "*" not in (category, subcategory): 645 return self.parameters == other.parameters or \ 646 not self.parameters or not other.parameters 647 else: 648 return True 649 else: 650 return False 651 652 def __ne__(self, other): 653 return not self.__eq__(other) 654 655 def __hash__(self): 656 return hash(self.media_range) 657 658 def __repr__(self): 659 return "MediaRange(%r)" % self.media_range 660 661 def categoryMatches(this, that): 662 663 """ 664 Return the basis of a match between 'this' and 'that' or False if the given 665 categories do not match. 666 """ 667 668 return (this == "*" or this == that) and this or \ 669 that == "*" and that or False 670 671 def categoryPermits(this, that): 672 673 """ 674 Return whether 'this' category permits 'that' category. Where 'this' is a 675 wildcard ("*"), 'that' should always match. A value of False is returned if 676 the categories do not otherwise match. 677 """ 678 679 return (this == "*" or this == that) and this or False 680 681 def getMappingFromParameterStrings(l): 682 683 """ 684 Return a mapping representing the list of "name=value" strings given by 'l'. 685 """ 686 687 parameters = {} 688 689 for parameter in l: 690 parts = parameter.split("=") 691 name = parts[0].strip() 692 value = "=".join(parts[1:]).strip() 693 parameters[name] = value 694 695 return parameters 696 697 def getContentPreferences(accept): 698 699 """ 700 Return a mapping from media types to parameters for content/media types 701 extracted from the given 'accept' header value. The mapping is returned in 702 the form of a list of (media type, parameters) tuples. 703 704 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 705 """ 706 707 preferences = [] 708 709 for field in accept.split(","): 710 711 # The media type with parameters (defined by the "media-range") is 712 # separated from any other parameters (defined as "accept-extension" 713 # parameters) by a quality parameter. 714 715 fparts = accept_regexp.split(field) 716 717 # The first part is always the media type. 718 719 media_type = fparts[0].strip() 720 721 # Any other parts can be interpreted as extension parameters. 722 723 if len(fparts) > 1: 724 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 725 else: 726 fparts = [] 727 728 # Each field in the preferences can incorporate parameters separated by 729 # semicolon characters. 730 731 parameters = getMappingFromParameterStrings(fparts) 732 media_range = MediaRange(media_type, parameters) 733 preferences.append(media_range) 734 735 return ContentPreferences(preferences) 736 737 class ContentPreferences: 738 739 "A wrapper around content preference information." 740 741 def __init__(self, preferences): 742 self.preferences = preferences 743 744 def __iter__(self): 745 return iter(self.preferences) 746 747 def get_ordered(self, by_quality=0): 748 749 """ 750 Return a list of content/media types in descending order of preference. 751 If 'by_quality' is set to a true value, the "q" value will be used as 752 the primary measure of preference; otherwise, only the specificity will 753 be considered. 754 """ 755 756 ordered = {} 757 758 for media_range in self.preferences: 759 specificity = media_range.get_specificity() 760 761 if by_quality: 762 q = float(media_range.accept_parameters.get("q", "1")) 763 key = q, specificity 764 else: 765 key = specificity 766 767 if not ordered.has_key(key): 768 ordered[key] = [] 769 770 ordered[key].append(media_range) 771 772 # Return the preferences in descending order of quality and specificity. 773 774 keys = ordered.keys() 775 keys.sort(reverse=True) 776 return [ordered[key] for key in keys] 777 778 def get_acceptable_types(self, available): 779 780 """ 781 Return content/media types from those in the 'available' list supported 782 by the known preferences grouped by preference level in descending order 783 of preference. 784 """ 785 786 matches = {} 787 available = set(available[:]) 788 789 for level in self.get_ordered(): 790 for media_range in level: 791 792 # Attempt to match available types. 793 794 found = set() 795 for available_type in available: 796 if media_range.permits(available_type): 797 q = float(media_range.accept_parameters.get("q", "1")) 798 if not matches.has_key(q): 799 matches[q] = [] 800 matches[q].append(available_type) 801 found.add(available_type) 802 803 # Stop looking for matches for matched available types. 804 805 if found: 806 available.difference_update(found) 807 808 # Sort the matches in descending order of quality. 809 810 all_q = matches.keys() 811 812 if all_q: 813 all_q.sort(reverse=True) 814 return [matches[q] for q in all_q] 815 else: 816 return [] 817 818 def get_preferred_types(self, available): 819 820 """ 821 Return the preferred content/media types from those in the 'available' 822 list, given the known preferences. 823 """ 824 825 preferred = self.get_acceptable_types(available) 826 if preferred: 827 return preferred[0] 828 else: 829 return [] 830 831 # Content type parsing. 832 833 def getContentTypeAndEncoding(content_type): 834 835 """ 836 Return a tuple with the content/media type and encoding, extracted from the 837 given 'content_type' header value. 838 """ 839 840 m = encoding_regexp.search(content_type) 841 if m: 842 return m.group("content_type"), m.group("encoding") 843 else: 844 return None, None 845 846 # Page access functions. 847 848 def getPageURL(page): 849 850 "Return the URL of the given 'page'." 851 852 request = page.request 853 return request.getQualifiedURL(page.url(request, relative=0)) 854 855 def getFormat(page): 856 857 "Get the format used on the given 'page'." 858 859 return page.pi["format"] 860 861 def getMetadata(page): 862 863 """ 864 Return a dictionary containing items describing for the given 'page' the 865 page's "created" time, "last-modified" time, "sequence" (or revision number) 866 and the "last-comment" made about the last edit. 867 """ 868 869 request = page.request 870 871 # Get the initial revision of the page. 872 873 revisions = page.getRevList() 874 875 if not revisions: 876 return {} 877 878 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 879 880 # Get the created and last modified times. 881 882 initial_revision = getPageRevision(event_page_initial) 883 884 metadata = {} 885 metadata["created"] = initial_revision["timestamp"] 886 latest_revision = getPageRevision(page) 887 metadata["last-modified"] = latest_revision["timestamp"] 888 metadata["sequence"] = len(revisions) - 1 889 metadata["last-comment"] = latest_revision["comment"] 890 891 return metadata 892 893 def getPageRevision(page): 894 895 "Return the revision details dictionary for the given 'page'." 896 897 # From Page.edit_info... 898 899 if hasattr(page, "editlog_entry"): 900 line = page.editlog_entry() 901 else: 902 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 903 904 # Similar to Page.mtime_usecs behaviour... 905 906 if line: 907 timestamp = line.ed_time_usecs 908 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 909 comment = line.comment 910 else: 911 mtime = 0 912 comment = "" 913 914 # Leave the time zone empty. 915 916 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 917 918 # Page parsing and formatting of embedded content. 919 920 def getPageParserClass(request): 921 922 "Using 'request', return a parser class for the current page's format." 923 924 return getParserClass(request, getFormat(request.page)) 925 926 def getParserClass(request, format): 927 928 """ 929 Return a parser class using the 'request' for the given 'format', returning 930 a plain text parser if no parser can be found for the specified 'format'. 931 """ 932 933 try: 934 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 935 except wikiutil.PluginMissingError: 936 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 937 938 def getFormatterClass(request, format): 939 940 """ 941 Return a formatter class using the 'request' for the given output 'format', 942 returning a plain text formatter if no formatter can be found for the 943 specified 'format'. 944 """ 945 946 try: 947 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 948 except wikiutil.PluginMissingError: 949 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 950 951 def formatText(text, request, fmt, parser_cls=None): 952 953 """ 954 Format the given 'text' using the specified 'request' and formatter 'fmt'. 955 Suppress line anchors in the output, and fix lists by indicating that a 956 paragraph has already been started. 957 """ 958 959 if not parser_cls: 960 parser_cls = getPageParserClass(request) 961 parser = parser_cls(text, request, line_anchors=False) 962 963 old_fmt = request.formatter 964 request.formatter = fmt 965 try: 966 return redirectedOutput(request, parser, fmt, inhibit_p=True) 967 finally: 968 request.formatter = old_fmt 969 970 def redirectedOutput(request, parser, fmt, **kw): 971 972 "A fixed version of the request method of the same name." 973 974 buf = StringIO() 975 request.redirect(buf) 976 try: 977 parser.format(fmt, **kw) 978 if hasattr(fmt, "flush"): 979 buf.write(fmt.flush(True)) 980 finally: 981 request.redirect() 982 text = buf.getvalue() 983 buf.close() 984 return text 985 986 # Textual representations. 987 988 def getSimpleWikiText(text): 989 990 """ 991 Return the plain text representation of the given 'text' which may employ 992 certain Wiki syntax features, such as those providing verbatim or monospaced 993 text. 994 """ 995 996 # NOTE: Re-implementing support for verbatim text and linking avoidance. 997 998 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 999 1000 def getEncodedWikiText(text): 1001 1002 "Encode the given 'text' in a verbatim representation." 1003 1004 return "<<Verbatim(%s)>>" % text 1005 1006 def getPrettyTitle(title): 1007 1008 "Return a nicely formatted version of the given 'title'." 1009 1010 return title.replace("_", " ").replace("/", u" ? ") 1011 1012 # User interface functions. 1013 1014 def getParameter(request, name, default=None): 1015 1016 """ 1017 Using the given 'request', return the value of the parameter with the given 1018 'name', returning the optional 'default' (or None) if no value was supplied 1019 in the 'request'. 1020 """ 1021 1022 return get_form(request).get(name, [default])[0] 1023 1024 def getQualifiedParameter(request, prefix, argname, default=None): 1025 1026 """ 1027 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1028 qualified parameter, returning the optional 'default' (or None) if no value 1029 was supplied in the 'request'. 1030 """ 1031 1032 argname = getQualifiedParameterName(prefix, argname) 1033 return getParameter(request, argname, default) 1034 1035 def getQualifiedParameterName(prefix, argname): 1036 1037 """ 1038 Return the qualified parameter name using the given 'prefix' and 'argname'. 1039 """ 1040 1041 if not prefix: 1042 return argname 1043 else: 1044 return "%s-%s" % (prefix, argname) 1045 1046 # Page-related functions. 1047 1048 def getPrettyPageName(page): 1049 1050 "Return a nicely formatted title/name for the given 'page'." 1051 1052 title = page.split_title(force=1) 1053 return getPrettyTitle(title) 1054 1055 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1056 1057 """ 1058 Using 'request', return a link to 'page' with the given link 'text' and 1059 optional 'query_string' and 'anchor'. 1060 """ 1061 1062 text = wikiutil.escape(text) 1063 return page.link_to_raw(request, text, query_string, anchor, **kw) 1064 1065 def linkToResource(url, request, text, query_string=None, anchor=None): 1066 1067 """ 1068 Using 'request', return a link to 'url' with the given link 'text' and 1069 optional 'query_string' and 'anchor'. 1070 """ 1071 1072 if anchor: 1073 url += "#%s" % anchor 1074 1075 if query_string: 1076 query_string = wikiutil.makeQueryString(query_string) 1077 url += "?%s" % query_string 1078 1079 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1080 1081 output = [] 1082 output.append(formatter.url(1, url)) 1083 output.append(formatter.text(text)) 1084 output.append(formatter.url(0)) 1085 return "".join(output) 1086 1087 def getFullPageName(parent, title): 1088 1089 """ 1090 Return a full page name from the given 'parent' page (can be empty or None) 1091 and 'title' (a simple page name). 1092 """ 1093 1094 if parent: 1095 return "%s/%s" % (parent.rstrip("/"), title) 1096 else: 1097 return title 1098 1099 # vim: tabstop=4 expandtab shiftwidth=4