1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Extraction of headings. 37 38 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 39 40 # Category extraction from pages. 41 42 category_regexp = None 43 44 # Simple content parsing. 45 46 verbatim_regexp = re.compile(ur'(?:' 47 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 48 ur'|' 49 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 50 ur'|' 51 ur'!(?P<verbatim3>.*?)(\s|$)?' 52 ur'|' 53 ur'`(?P<monospace>.*?)`' 54 ur'|' 55 ur'{{{(?P<preformatted>.*?)}}}' 56 ur')', re.UNICODE) 57 58 # Category discovery. 59 60 def getCategoryPattern(request): 61 global category_regexp 62 63 try: 64 return request.cfg.cache.page_category_regexact 65 except AttributeError: 66 67 # Use regular expression from MoinMoin 1.7.1 otherwise. 68 69 if category_regexp is None: 70 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 71 return category_regexp 72 73 def getCategories(request): 74 75 """ 76 From the AdvancedSearch macro, return a list of category page names using 77 the given 'request'. 78 """ 79 80 # This will return all pages with "Category" in the title. 81 82 cat_filter = getCategoryPattern(request).search 83 return request.rootpage.getPageList(filter=cat_filter) 84 85 def getCategoryMapping(category_pagenames, request): 86 87 """ 88 For the given 'category_pagenames' return a list of tuples of the form 89 (category name, category page name) using the given 'request'. 90 """ 91 92 cat_pattern = getCategoryPattern(request) 93 mapping = [] 94 for pagename in category_pagenames: 95 name = cat_pattern.match(pagename).group("key") 96 if name != "Category": 97 mapping.append((name, pagename)) 98 mapping.sort() 99 return mapping 100 101 def getCategoryPages(pagename, request): 102 103 """ 104 Return the pages associated with the given category 'pagename' using the 105 'request'. 106 """ 107 108 query = search.QueryParser().parse_query('category:%s' % pagename) 109 results = search.searchPages(request, query, "page_name") 110 return filterCategoryPages(results, request) 111 112 def filterCategoryPages(results, request): 113 114 "Filter category pages from the given 'results' using the 'request'." 115 116 cat_pattern = getCategoryPattern(request) 117 pages = [] 118 for page in results.hits: 119 if not cat_pattern.match(page.page_name): 120 pages.append(page) 121 return pages 122 123 def getAllCategoryPages(category_names, request): 124 125 """ 126 Return all pages belonging to the categories having the given 127 'category_names', using the given 'request'. 128 """ 129 130 pages = [] 131 pagenames = set() 132 133 for category_name in category_names: 134 135 # Get the pages and page names in the category. 136 137 pages_in_category = getCategoryPages(category_name, request) 138 139 # Visit each page in the category. 140 141 for page_in_category in pages_in_category: 142 pagename = page_in_category.page_name 143 144 # Only process each page once. 145 146 if pagename in pagenames: 147 continue 148 else: 149 pagenames.add(pagename) 150 151 pages.append(page_in_category) 152 153 return pages 154 155 def getPagesForSearch(search_pattern, request): 156 157 """ 158 Return result pages for a search employing the given 'search_pattern' and 159 using the given 'request'. 160 """ 161 162 query = search.QueryParser().parse_query(search_pattern) 163 results = search.searchPages(request, query, "page_name") 164 return filterCategoryPages(results, request) 165 166 # WikiDict functions. 167 168 def getWikiDict(pagename, request): 169 170 """ 171 Return the WikiDict provided by the given 'pagename' using the given 172 'request'. 173 """ 174 175 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 176 if hasattr(request.dicts, "dict"): 177 return request.dicts.dict(pagename) 178 else: 179 return request.dicts[pagename] 180 else: 181 return None 182 183 # Searching-related functions. 184 185 def getPagesFromResults(result_pages, request): 186 187 "Return genuine pages for the given 'result_pages' using the 'request'." 188 189 return [Page(request, page.page_name) for page in result_pages] 190 191 # Region/section parsing. 192 193 def getRegions(s, include_non_regions=False): 194 195 """ 196 Parse the string 's', returning a list of explicitly declared regions. 197 198 If 'include_non_regions' is specified as a true value, fragments will be 199 included for text between explicitly declared regions. 200 """ 201 202 regions = [] 203 marker = None 204 is_block = True 205 206 # Start a region for exposed text, if appropriate. 207 208 if include_non_regions: 209 regions.append("") 210 211 for match_text in marker_regexp.split(s): 212 213 # Capture section text. 214 215 if is_block: 216 if marker or include_non_regions: 217 regions[-1] += match_text 218 219 # Handle section markers. 220 221 else: 222 223 # Close any open sections, returning to exposed text regions. 224 225 if marker: 226 227 # Add any marker to the current region, regardless of whether it 228 # successfully closes a section. 229 230 regions[-1] += match_text 231 232 if match_text.startswith("}") and len(marker) == len(match_text): 233 marker = None 234 235 # Start a region for exposed text, if appropriate. 236 237 if include_non_regions: 238 regions.append("") 239 240 # Without a current marker, start a new section. 241 242 else: 243 marker = match_text 244 regions.append("") 245 246 # Add the marker to the new region. 247 248 regions[-1] += match_text 249 250 # The match text alternates between text between markers and the markers 251 # themselves. 252 253 is_block = not is_block 254 255 return regions 256 257 def getFragmentsFromRegions(regions): 258 259 """ 260 Return fragments from the given 'regions', each having the form 261 (format, attributes, body text). 262 """ 263 264 fragments = [] 265 266 for region in regions: 267 format, attributes, body, header, close = getFragmentFromRegion(region) 268 fragments.append((format, attributes, body)) 269 270 return fragments 271 272 def getFragmentFromRegion(region): 273 274 """ 275 Return a fragment for the given 'region' having the form (format, 276 attributes, body text, header, close), where the 'header' is the original 277 declaration of the 'region' or None if no explicit region is defined, and 278 'close' is the closing marker of the 'region' or None if no explicit region 279 is defined. 280 """ 281 282 if region.startswith("{{{"): 283 284 body = region.lstrip("{") 285 level = len(region) - len(body) 286 body = body.rstrip("}").lstrip() 287 288 # Remove any prelude and process metadata. 289 290 if body.startswith("#!"): 291 292 try: 293 declaration, body = body.split("\n", 1) 294 except ValueError: 295 declaration = body 296 body = "" 297 298 arguments = declaration[2:] 299 300 # Get any parser/format declaration. 301 302 if arguments and not arguments[0].isspace(): 303 details = arguments.split(None, 1) 304 if len(details) == 2: 305 format, arguments = details 306 else: 307 format = details[0] 308 arguments = "" 309 else: 310 format = None 311 312 # Get the attributes/arguments for the region. 313 314 attributes = parseAttributes(arguments, False) 315 316 # Add an entry for the format in the attribute dictionary. 317 318 if format and not attributes.has_key(format): 319 attributes[format] = True 320 321 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 322 323 else: 324 return None, {}, body, level * "{" + "\n", level * "}" 325 326 else: 327 return None, {}, region, None, None 328 329 def getFragments(s, include_non_regions=False): 330 331 """ 332 Return fragments for the given string 's', each having the form 333 (format, arguments, body text). 334 335 If 'include_non_regions' is specified as a true value, fragments will be 336 included for text between explicitly declared regions. 337 """ 338 339 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 340 341 # Heading extraction. 342 343 def getHeadings(s): 344 345 """ 346 Return tuples of the form (level, title, span) for headings found within the 347 given string 's'. The span is itself a (start, end) tuple indicating the 348 matching region of 's' for a heading declaration. 349 """ 350 351 headings = [] 352 353 for match in heading_regexp.finditer(s): 354 headings.append( 355 (len(match.group("level")), match.group("heading"), match.span()) 356 ) 357 358 return headings 359 360 # Region/section attribute parsing. 361 362 def parseAttributes(s, escape=True): 363 364 """ 365 Parse the section attributes string 's', returning a mapping of names to 366 values. If 'escape' is set to a true value, the attributes will be suitable 367 for use with the formatter API. If 'escape' is set to a false value, the 368 attributes will have any quoting removed. 369 """ 370 371 attrs = {} 372 f = StringIO(s) 373 name = None 374 need_value = False 375 lex = shlex(f) 376 lex.wordchars += "-" 377 378 for token in lex: 379 380 # Capture the name if needed. 381 382 if name is None: 383 name = escape and wikiutil.escape(token) or strip_token(token) 384 385 # Detect either an equals sign or another name. 386 387 elif not need_value: 388 if token == "=": 389 need_value = True 390 else: 391 attrs[name.lower()] = escape and "true" or True 392 name = wikiutil.escape(token) 393 394 # Otherwise, capture a value. 395 396 else: 397 # Quoting of attributes done similarly to wikiutil.parseAttributes. 398 399 if token: 400 if escape: 401 if token[0] in ("'", '"'): 402 token = wikiutil.escape(token) 403 else: 404 token = '"%s"' % wikiutil.escape(token, 1) 405 else: 406 token = strip_token(token) 407 408 attrs[name.lower()] = token 409 name = None 410 need_value = False 411 412 # Handle any name-only attributes at the end of the collection. 413 414 if name and not need_value: 415 attrs[name.lower()] = escape and "true" or True 416 417 return attrs 418 419 def strip_token(token): 420 421 "Return the given 'token' stripped of quoting." 422 423 if token[0] in ("'", '"') and token[-1] == token[0]: 424 return token[1:-1] 425 else: 426 return token 427 428 # Request-related classes and associated functions. 429 430 class Form: 431 432 """ 433 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 434 environment. 435 """ 436 437 def __init__(self, form): 438 self.form = form 439 440 def has_key(self, name): 441 return not not self.form.getlist(name) 442 443 def get(self, name, default=None): 444 values = self.form.getlist(name) 445 if not values: 446 return default 447 else: 448 return values 449 450 def __getitem__(self, name): 451 return self.form.getlist(name) 452 453 def __delitem__(self, name): 454 del self.form[name] 455 456 def keys(self): 457 return self.form.keys() 458 459 def items(self): 460 return self.form.items(True) 461 462 class ActionSupport: 463 464 """ 465 Work around disruptive MoinMoin changes in 1.9, and also provide useful 466 convenience methods. 467 """ 468 469 def get_form(self): 470 return get_form(self.request) 471 472 def _get_selected(self, value, input_value): 473 474 """ 475 Return the HTML attribute text indicating selection of an option (or 476 otherwise) if 'value' matches 'input_value'. 477 """ 478 479 return input_value is not None and value == input_value and 'selected="selected"' or '' 480 481 def _get_selected_for_list(self, value, input_values): 482 483 """ 484 Return the HTML attribute text indicating selection of an option (or 485 otherwise) if 'value' matches one of the 'input_values'. 486 """ 487 488 return value in input_values and 'selected="selected"' or '' 489 490 def get_option_list(self, value, values): 491 492 """ 493 Return a list of HTML element definitions for options describing the 494 given 'values', selecting the option with the specified 'value' if 495 present. 496 """ 497 498 options = [] 499 for available_value in values: 500 selected = self._get_selected(available_value, value) 501 options.append('<option value="%s" %s>%s</option>' % ( 502 escattr(available_value), selected, wikiutil.escape(available_value))) 503 return options 504 505 def _get_input(self, form, name, default=None): 506 507 """ 508 Return the input from 'form' having the given 'name', returning either 509 the input converted to an integer or the given 'default' (optional, None 510 if not specified). 511 """ 512 513 value = form.get(name, [None])[0] 514 if not value: # true if 0 obtained 515 return default 516 else: 517 return int(value) 518 519 def get_form(request): 520 521 "Work around disruptive MoinMoin changes in 1.9." 522 523 if hasattr(request, "values"): 524 return Form(request.values) 525 else: 526 return request.form 527 528 class send_headers_cls: 529 530 """ 531 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 532 1.9.x environment. 533 """ 534 535 def __init__(self, request): 536 self.request = request 537 538 def __call__(self, headers): 539 for header in headers: 540 parts = header.split(":") 541 self.request.headers.add(parts[0], ":".join(parts[1:])) 542 543 def get_send_headers(request): 544 545 "Return a function that can send response headers." 546 547 if hasattr(request, "http_headers"): 548 return request.http_headers 549 elif hasattr(request, "emit_http_headers"): 550 return request.emit_http_headers 551 else: 552 return send_headers_cls(request) 553 554 def escattr(s): 555 return wikiutil.escape(s, 1) 556 557 def getPathInfo(request): 558 if hasattr(request, "getPathinfo"): 559 return request.getPathinfo() 560 else: 561 return request.path 562 563 def getHeader(request, header_name, prefix=None): 564 565 """ 566 Using the 'request', return the value of the header with the given 567 'header_name', using the optional 'prefix' to obtain protocol-specific 568 headers if necessary. 569 570 If no value is found for the given 'header_name', None is returned. 571 """ 572 573 if hasattr(request, "getHeader"): 574 return request.getHeader(header_name) 575 elif hasattr(request, "headers"): 576 return request.headers.get(header_name) 577 else: 578 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 579 580 def writeHeaders(request, mimetype, metadata, status=None): 581 582 """ 583 Using the 'request', write resource headers using the given 'mimetype', 584 based on the given 'metadata'. If the optional 'status' is specified, set 585 the status header to the given value. 586 """ 587 588 send_headers = get_send_headers(request) 589 590 # Define headers. 591 592 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 593 594 # Define the last modified time. 595 # NOTE: Consider using request.httpDate. 596 597 latest_timestamp = metadata.get("last-modified") 598 if latest_timestamp: 599 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 600 601 if status: 602 headers.append("Status: %s" % status) 603 604 send_headers(headers) 605 606 # Content/media type and preferences support. 607 608 class MediaRange: 609 610 "A content/media type value which supports whole categories of data." 611 612 def __init__(self, media_range, accept_parameters=None): 613 self.media_range = media_range 614 self.accept_parameters = accept_parameters or {} 615 616 parts = media_range.split(";") 617 self.media_type = parts[0] 618 self.parameters = getMappingFromParameterStrings(parts[1:]) 619 620 # The media type is divided into category and subcategory. 621 622 parts = self.media_type.split("/") 623 self.category = parts[0] 624 self.subcategory = "/".join(parts[1:]) 625 626 def get_parts(self): 627 628 "Return the category, subcategory parts." 629 630 return self.category, self.subcategory 631 632 def get_specificity(self): 633 634 """ 635 Return the specificity of the media type in terms of the scope of the 636 category and subcategory, and also in terms of any qualifying 637 parameters. 638 """ 639 640 if "*" in self.get_parts(): 641 return -list(self.get_parts()).count("*") 642 else: 643 return len(self.parameters) 644 645 def permits(self, other): 646 647 """ 648 Return whether this media type permits the use of the 'other' media type 649 if suggested as suitable content. 650 """ 651 652 if not isinstance(other, MediaRange): 653 other = MediaRange(other) 654 655 category = categoryPermits(self.category, other.category) 656 subcategory = categoryPermits(self.subcategory, other.subcategory) 657 658 if category and subcategory: 659 if "*" not in (category, subcategory): 660 return not self.parameters or self.parameters == other.parameters 661 else: 662 return True 663 else: 664 return False 665 666 def __eq__(self, other): 667 668 """ 669 Return whether this media type is effectively the same as the 'other' 670 media type. 671 """ 672 673 if not isinstance(other, MediaRange): 674 other = MediaRange(other) 675 676 category = categoryMatches(self.category, other.category) 677 subcategory = categoryMatches(self.subcategory, other.subcategory) 678 679 if category and subcategory: 680 if "*" not in (category, subcategory): 681 return self.parameters == other.parameters or \ 682 not self.parameters or not other.parameters 683 else: 684 return True 685 else: 686 return False 687 688 def __ne__(self, other): 689 return not self.__eq__(other) 690 691 def __hash__(self): 692 return hash(self.media_range) 693 694 def __repr__(self): 695 return "MediaRange(%r)" % self.media_range 696 697 def categoryMatches(this, that): 698 699 """ 700 Return the basis of a match between 'this' and 'that' or False if the given 701 categories do not match. 702 """ 703 704 return (this == "*" or this == that) and this or \ 705 that == "*" and that or False 706 707 def categoryPermits(this, that): 708 709 """ 710 Return whether 'this' category permits 'that' category. Where 'this' is a 711 wildcard ("*"), 'that' should always match. A value of False is returned if 712 the categories do not otherwise match. 713 """ 714 715 return (this == "*" or this == that) and this or False 716 717 def getMappingFromParameterStrings(l): 718 719 """ 720 Return a mapping representing the list of "name=value" strings given by 'l'. 721 """ 722 723 parameters = {} 724 725 for parameter in l: 726 parts = parameter.split("=") 727 name = parts[0].strip() 728 value = "=".join(parts[1:]).strip() 729 parameters[name] = value 730 731 return parameters 732 733 def getContentPreferences(accept): 734 735 """ 736 Return a mapping from media types to parameters for content/media types 737 extracted from the given 'accept' header value. The mapping is returned in 738 the form of a list of (media type, parameters) tuples. 739 740 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 741 """ 742 743 preferences = [] 744 745 for field in accept.split(","): 746 747 # The media type with parameters (defined by the "media-range") is 748 # separated from any other parameters (defined as "accept-extension" 749 # parameters) by a quality parameter. 750 751 fparts = accept_regexp.split(field) 752 753 # The first part is always the media type. 754 755 media_type = fparts[0].strip() 756 757 # Any other parts can be interpreted as extension parameters. 758 759 if len(fparts) > 1: 760 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 761 else: 762 fparts = [] 763 764 # Each field in the preferences can incorporate parameters separated by 765 # semicolon characters. 766 767 parameters = getMappingFromParameterStrings(fparts) 768 media_range = MediaRange(media_type, parameters) 769 preferences.append(media_range) 770 771 return ContentPreferences(preferences) 772 773 class ContentPreferences: 774 775 "A wrapper around content preference information." 776 777 def __init__(self, preferences): 778 self.preferences = preferences 779 780 def __iter__(self): 781 return iter(self.preferences) 782 783 def get_ordered(self, by_quality=0): 784 785 """ 786 Return a list of content/media types in descending order of preference. 787 If 'by_quality' is set to a true value, the "q" value will be used as 788 the primary measure of preference; otherwise, only the specificity will 789 be considered. 790 """ 791 792 ordered = {} 793 794 for media_range in self.preferences: 795 specificity = media_range.get_specificity() 796 797 if by_quality: 798 q = float(media_range.accept_parameters.get("q", "1")) 799 key = q, specificity 800 else: 801 key = specificity 802 803 if not ordered.has_key(key): 804 ordered[key] = [] 805 806 ordered[key].append(media_range) 807 808 # Return the preferences in descending order of quality and specificity. 809 810 keys = ordered.keys() 811 keys.sort(reverse=True) 812 return [ordered[key] for key in keys] 813 814 def get_acceptable_types(self, available): 815 816 """ 817 Return content/media types from those in the 'available' list supported 818 by the known preferences grouped by preference level in descending order 819 of preference. 820 """ 821 822 matches = {} 823 available = set(available[:]) 824 825 for level in self.get_ordered(): 826 for media_range in level: 827 828 # Attempt to match available types. 829 830 found = set() 831 for available_type in available: 832 if media_range.permits(available_type): 833 q = float(media_range.accept_parameters.get("q", "1")) 834 if not matches.has_key(q): 835 matches[q] = [] 836 matches[q].append(available_type) 837 found.add(available_type) 838 839 # Stop looking for matches for matched available types. 840 841 if found: 842 available.difference_update(found) 843 844 # Sort the matches in descending order of quality. 845 846 all_q = matches.keys() 847 848 if all_q: 849 all_q.sort(reverse=True) 850 return [matches[q] for q in all_q] 851 else: 852 return [] 853 854 def get_preferred_types(self, available): 855 856 """ 857 Return the preferred content/media types from those in the 'available' 858 list, given the known preferences. 859 """ 860 861 preferred = self.get_acceptable_types(available) 862 if preferred: 863 return preferred[0] 864 else: 865 return [] 866 867 # Content type parsing. 868 869 def getContentTypeAndEncoding(content_type): 870 871 """ 872 Return a tuple with the content/media type and encoding, extracted from the 873 given 'content_type' header value. 874 """ 875 876 m = encoding_regexp.search(content_type) 877 if m: 878 return m.group("content_type"), m.group("encoding") 879 else: 880 return None, None 881 882 # Page access functions. 883 884 def getPageURL(page): 885 886 "Return the URL of the given 'page'." 887 888 request = page.request 889 return request.getQualifiedURL(page.url(request, relative=0)) 890 891 def getFormat(page): 892 893 "Get the format used on the given 'page'." 894 895 return page.pi["format"] 896 897 def getMetadata(page): 898 899 """ 900 Return a dictionary containing items describing for the given 'page' the 901 page's "created" time, "last-modified" time, "sequence" (or revision number) 902 and the "last-comment" made about the last edit. 903 """ 904 905 request = page.request 906 907 # Get the initial revision of the page. 908 909 revisions = page.getRevList() 910 911 if not revisions: 912 return {} 913 914 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 915 916 # Get the created and last modified times. 917 918 initial_revision = getPageRevision(event_page_initial) 919 920 metadata = {} 921 metadata["created"] = initial_revision["timestamp"] 922 latest_revision = getPageRevision(page) 923 metadata["last-modified"] = latest_revision["timestamp"] 924 metadata["sequence"] = len(revisions) - 1 925 metadata["last-comment"] = latest_revision["comment"] 926 927 return metadata 928 929 def getPageRevision(page): 930 931 "Return the revision details dictionary for the given 'page'." 932 933 # From Page.edit_info... 934 935 if hasattr(page, "editlog_entry"): 936 line = page.editlog_entry() 937 else: 938 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 939 940 # Similar to Page.mtime_usecs behaviour... 941 942 if line: 943 timestamp = line.ed_time_usecs 944 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 945 comment = line.comment 946 else: 947 mtime = 0 948 comment = "" 949 950 # Leave the time zone empty. 951 952 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 953 954 # Page parsing and formatting of embedded content. 955 956 def getPageParserClass(request): 957 958 "Using 'request', return a parser class for the current page's format." 959 960 return getParserClass(request, getFormat(request.page)) 961 962 def getParserClass(request, format): 963 964 """ 965 Return a parser class using the 'request' for the given 'format', returning 966 a plain text parser if no parser can be found for the specified 'format'. 967 """ 968 969 try: 970 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 971 except wikiutil.PluginMissingError: 972 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 973 974 def getFormatterClass(request, format): 975 976 """ 977 Return a formatter class using the 'request' for the given output 'format', 978 returning a plain text formatter if no formatter can be found for the 979 specified 'format'. 980 """ 981 982 try: 983 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 984 except wikiutil.PluginMissingError: 985 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 986 987 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 988 989 """ 990 Format the given 'text' using the specified 'request' and formatter 'fmt'. 991 Suppress line anchors in the output, and fix lists by indicating that a 992 paragraph has already been started. 993 """ 994 995 if not parser_cls: 996 parser_cls = getPageParserClass(request) 997 parser = parser_cls(text, request, line_anchors=False) 998 999 old_fmt = request.formatter 1000 request.formatter = fmt 1001 try: 1002 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 1003 finally: 1004 request.formatter = old_fmt 1005 1006 def redirectedOutput(request, parser, fmt, **kw): 1007 1008 "A fixed version of the request method of the same name." 1009 1010 buf = StringIO() 1011 request.redirect(buf) 1012 try: 1013 parser.format(fmt, **kw) 1014 if hasattr(fmt, "flush"): 1015 buf.write(fmt.flush(True)) 1016 finally: 1017 request.redirect() 1018 text = buf.getvalue() 1019 buf.close() 1020 return text 1021 1022 # Textual representations. 1023 1024 def getSimpleWikiText(text): 1025 1026 """ 1027 Return the plain text representation of the given 'text' which may employ 1028 certain Wiki syntax features, such as those providing verbatim or monospaced 1029 text. 1030 """ 1031 1032 # NOTE: Re-implementing support for verbatim text and linking avoidance. 1033 1034 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 1035 1036 def getEncodedWikiText(text): 1037 1038 "Encode the given 'text' in a verbatim representation." 1039 1040 return "<<Verbatim(%s)>>" % text 1041 1042 def getPrettyTitle(title): 1043 1044 "Return a nicely formatted version of the given 'title'." 1045 1046 return title.replace("_", " ").replace("/", u" ? ") 1047 1048 # User interface functions. 1049 1050 def getParameter(request, name, default=None): 1051 1052 """ 1053 Using the given 'request', return the value of the parameter with the given 1054 'name', returning the optional 'default' (or None) if no value was supplied 1055 in the 'request'. 1056 """ 1057 1058 return get_form(request).get(name, [default])[0] 1059 1060 def getQualifiedParameter(request, prefix, argname, default=None): 1061 1062 """ 1063 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1064 qualified parameter, returning the optional 'default' (or None) if no value 1065 was supplied in the 'request'. 1066 """ 1067 1068 argname = getQualifiedParameterName(prefix, argname) 1069 return getParameter(request, argname, default) 1070 1071 def getQualifiedParameterName(prefix, argname): 1072 1073 """ 1074 Return the qualified parameter name using the given 'prefix' and 'argname'. 1075 """ 1076 1077 if not prefix: 1078 return argname 1079 else: 1080 return "%s-%s" % (prefix, argname) 1081 1082 # Page-related functions. 1083 1084 def getPrettyPageName(page): 1085 1086 "Return a nicely formatted title/name for the given 'page'." 1087 1088 title = page.split_title(force=1) 1089 return getPrettyTitle(title) 1090 1091 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1092 1093 """ 1094 Using 'request', return a link to 'page' with the given link 'text' and 1095 optional 'query_string' and 'anchor'. 1096 """ 1097 1098 text = wikiutil.escape(text) 1099 return page.link_to_raw(request, text, query_string, anchor, **kw) 1100 1101 def linkToResource(url, request, text, query_string=None, anchor=None): 1102 1103 """ 1104 Using 'request', return a link to 'url' with the given link 'text' and 1105 optional 'query_string' and 'anchor'. 1106 """ 1107 1108 if anchor: 1109 url += "#%s" % anchor 1110 1111 if query_string: 1112 query_string = wikiutil.makeQueryString(query_string) 1113 url += "?%s" % query_string 1114 1115 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1116 1117 output = [] 1118 output.append(formatter.url(1, url)) 1119 output.append(formatter.text(text)) 1120 output.append(formatter.url(0)) 1121 return "".join(output) 1122 1123 def getFullPageName(parent, title): 1124 1125 """ 1126 Return a full page name from the given 'parent' page (can be empty or None) 1127 and 'title' (a simple page name). 1128 """ 1129 1130 if parent: 1131 return "%s/%s" % (parent.rstrip("/"), title) 1132 else: 1133 return title 1134 1135 # vim: tabstop=4 expandtab shiftwidth=4