1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Extraction of headings. 37 38 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 39 40 # Category extraction from pages. 41 42 category_regexp = None 43 44 # Simple content parsing. 45 46 verbatim_regexp = re.compile(ur'(?:' 47 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 48 ur'|' 49 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 50 ur'|' 51 ur'!(?P<verbatim3>.*?)(\s|$)?' 52 ur'|' 53 ur'`(?P<monospace>.*?)`' 54 ur'|' 55 ur'{{{(?P<preformatted>.*?)}}}' 56 ur')', re.UNICODE) 57 58 # Category discovery. 59 60 def getCategoryPattern(request): 61 global category_regexp 62 63 try: 64 return request.cfg.cache.page_category_regexact 65 except AttributeError: 66 67 # Use regular expression from MoinMoin 1.7.1 otherwise. 68 69 if category_regexp is None: 70 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 71 return category_regexp 72 73 def getCategories(request): 74 75 """ 76 From the AdvancedSearch macro, return a list of category page names using 77 the given 'request'. 78 """ 79 80 # This will return all pages with "Category" in the title. 81 82 cat_filter = getCategoryPattern(request).search 83 return request.rootpage.getPageList(filter=cat_filter) 84 85 def getCategoryMapping(category_pagenames, request): 86 87 """ 88 For the given 'category_pagenames' return a list of tuples of the form 89 (category name, category page name) using the given 'request'. 90 """ 91 92 cat_pattern = getCategoryPattern(request) 93 mapping = [] 94 for pagename in category_pagenames: 95 name = cat_pattern.match(pagename).group("key") 96 if name != "Category": 97 mapping.append((name, pagename)) 98 mapping.sort() 99 return mapping 100 101 def getCategoryPages(pagename, request): 102 103 """ 104 Return the pages associated with the given category 'pagename' using the 105 'request'. 106 """ 107 108 query = search.QueryParser().parse_query('category:%s' % pagename) 109 results = search.searchPages(request, query, "page_name") 110 return filterCategoryPages(results, request) 111 112 def filterCategoryPages(results, request): 113 114 "Filter category pages from the given 'results' using the 'request'." 115 116 cat_pattern = getCategoryPattern(request) 117 pages = [] 118 for page in results.hits: 119 if not cat_pattern.match(page.page_name): 120 pages.append(page) 121 return pages 122 123 def getAllCategoryPages(category_names, request): 124 125 """ 126 Return all pages belonging to the categories having the given 127 'category_names', using the given 'request'. 128 """ 129 130 pages = [] 131 pagenames = set() 132 133 for category_name in category_names: 134 135 # Get the pages and page names in the category. 136 137 pages_in_category = getCategoryPages(category_name, request) 138 139 # Visit each page in the category. 140 141 for page_in_category in pages_in_category: 142 pagename = page_in_category.page_name 143 144 # Only process each page once. 145 146 if pagename in pagenames: 147 continue 148 else: 149 pagenames.add(pagename) 150 151 pages.append(page_in_category) 152 153 return pages 154 155 def getPagesForSearch(search_pattern, request): 156 157 """ 158 Return result pages for a search employing the given 'search_pattern' and 159 using the given 'request'. 160 """ 161 162 query = search.QueryParser().parse_query(search_pattern) 163 results = search.searchPages(request, query, "page_name") 164 return filterCategoryPages(results, request) 165 166 # WikiDict functions. 167 168 def getWikiDict(pagename, request): 169 170 """ 171 Return the WikiDict provided by the given 'pagename' using the given 172 'request'. 173 """ 174 175 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 176 if hasattr(request.dicts, "dict"): 177 return request.dicts.dict(pagename) 178 else: 179 return request.dicts[pagename] 180 else: 181 return None 182 183 # Searching-related functions. 184 185 def getPagesFromResults(result_pages, request): 186 187 "Return genuine pages for the given 'result_pages' using the 'request'." 188 189 return [Page(request, page.page_name) for page in result_pages] 190 191 # Region/section parsing. 192 193 def getRegions(s, include_non_regions=False): 194 195 """ 196 Parse the string 's', returning a list of explicitly declared regions. 197 198 If 'include_non_regions' is specified as a true value, fragments will be 199 included for text between explicitly declared regions. 200 """ 201 202 regions = [] 203 marker = None 204 is_block = True 205 206 # Start a region for exposed text, if appropriate. 207 208 if include_non_regions: 209 regions.append("") 210 211 for match_text in marker_regexp.split(s): 212 213 # Capture section text. 214 215 if is_block: 216 if marker or include_non_regions: 217 regions[-1] += match_text 218 219 # Handle section markers. 220 221 else: 222 223 # Close any open sections, returning to exposed text regions. 224 225 if marker: 226 227 # Add any marker to the current region, regardless of whether it 228 # successfully closes a section. 229 230 regions[-1] += match_text 231 232 if match_text.startswith("}") and len(marker) == len(match_text): 233 marker = None 234 235 # Start a region for exposed text, if appropriate. 236 237 if include_non_regions: 238 regions.append("") 239 240 # Without a current marker, start a new section. 241 242 else: 243 marker = match_text 244 regions.append("") 245 246 # Add the marker to the new region. 247 248 regions[-1] += match_text 249 250 # The match text alternates between text between markers and the markers 251 # themselves. 252 253 is_block = not is_block 254 255 return regions 256 257 def getFragmentsFromRegions(regions): 258 259 """ 260 Return fragments from the given 'regions', each having the form 261 (format, attributes, body text). 262 """ 263 264 fragments = [] 265 266 for region in regions: 267 format, attributes, body, header, close = getFragmentFromRegion(region) 268 fragments.append((format, attributes, body)) 269 270 return fragments 271 272 def getFragmentFromRegion(region): 273 274 """ 275 Return a fragment for the given 'region' having the form (format, 276 attributes, body text, header, close), where the 'header' is the original 277 declaration of the 'region' or None if no explicit region is defined, and 278 'close' is the closing marker of the 'region' or None if no explicit region 279 is defined. 280 """ 281 282 if region.startswith("{{{"): 283 284 body = region.lstrip("{") 285 level = len(region) - len(body) 286 body = body.rstrip("}").lstrip() 287 288 # Remove any prelude and process metadata. 289 290 if body.startswith("#!"): 291 292 try: 293 declaration, body = body.split("\n", 1) 294 except ValueError: 295 declaration = body 296 body = "" 297 298 arguments = declaration[2:] 299 300 # Get any parser/format declaration. 301 302 if arguments and not arguments[0].isspace(): 303 details = arguments.split(None, 1) 304 if len(details) == 2: 305 format, arguments = details 306 else: 307 format = details[0] 308 arguments = "" 309 else: 310 format = None 311 312 # Get the attributes/arguments for the region. 313 314 attributes = parseAttributes(arguments, False) 315 316 # Add an entry for the format in the attribute dictionary. 317 318 if format and not attributes.has_key(format): 319 attributes[format] = True 320 321 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 322 323 else: 324 return None, {}, body, level * "{" + "\n", level * "}" 325 326 else: 327 return None, {}, region, None, None 328 329 def getFragments(s, include_non_regions=False): 330 331 """ 332 Return fragments for the given string 's', each having the form 333 (format, arguments, body text). 334 335 If 'include_non_regions' is specified as a true value, fragments will be 336 included for text between explicitly declared regions. 337 """ 338 339 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 340 341 # Heading extraction. 342 343 def getHeadings(s): 344 345 """ 346 Return tuples of the form (level, title, span) for headings found within the 347 given string 's'. The span is itself a (start, end) tuple indicating the 348 matching region of 's' for a heading declaration. 349 """ 350 351 headings = [] 352 353 for match in heading_regexp.finditer(s): 354 headings.append( 355 (len(match.group("level")), match.group("heading"), match.span()) 356 ) 357 358 return headings 359 360 # Region/section attribute parsing. 361 362 def parseAttributes(s, escape=True): 363 364 """ 365 Parse the section attributes string 's', returning a mapping of names to 366 values. If 'escape' is set to a true value, the attributes will be suitable 367 for use with the formatter API. If 'escape' is set to a false value, the 368 attributes will have any quoting removed. 369 """ 370 371 attrs = {} 372 f = StringIO(s) 373 name = None 374 need_value = False 375 376 for token in shlex(f): 377 378 # Capture the name if needed. 379 380 if name is None: 381 name = escape and wikiutil.escape(token) or strip_token(token) 382 383 # Detect either an equals sign or another name. 384 385 elif not need_value: 386 if token == "=": 387 need_value = True 388 else: 389 attrs[name.lower()] = escape and "true" or True 390 name = wikiutil.escape(token) 391 392 # Otherwise, capture a value. 393 394 else: 395 # Quoting of attributes done similarly to wikiutil.parseAttributes. 396 397 if token: 398 if escape: 399 if token[0] in ("'", '"'): 400 token = wikiutil.escape(token) 401 else: 402 token = '"%s"' % wikiutil.escape(token, 1) 403 else: 404 token = strip_token(token) 405 406 attrs[name.lower()] = token 407 name = None 408 need_value = False 409 410 # Handle any name-only attributes at the end of the collection. 411 412 if name and not need_value: 413 attrs[name.lower()] = escape and "true" or True 414 415 return attrs 416 417 def strip_token(token): 418 419 "Return the given 'token' stripped of quoting." 420 421 if token[0] in ("'", '"') and token[-1] == token[0]: 422 return token[1:-1] 423 else: 424 return token 425 426 # Request-related classes and associated functions. 427 428 class Form: 429 430 """ 431 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 432 environment. 433 """ 434 435 def __init__(self, form): 436 self.form = form 437 438 def has_key(self, name): 439 return not not self.form.getlist(name) 440 441 def get(self, name, default=None): 442 values = self.form.getlist(name) 443 if not values: 444 return default 445 else: 446 return values 447 448 def __getitem__(self, name): 449 return self.form.getlist(name) 450 451 def items(self): 452 return self.form.items(True) 453 454 class ActionSupport: 455 456 """ 457 Work around disruptive MoinMoin changes in 1.9, and also provide useful 458 convenience methods. 459 """ 460 461 def get_form(self): 462 return get_form(self.request) 463 464 def _get_selected(self, value, input_value): 465 466 """ 467 Return the HTML attribute text indicating selection of an option (or 468 otherwise) if 'value' matches 'input_value'. 469 """ 470 471 return input_value is not None and value == input_value and 'selected="selected"' or '' 472 473 def _get_selected_for_list(self, value, input_values): 474 475 """ 476 Return the HTML attribute text indicating selection of an option (or 477 otherwise) if 'value' matches one of the 'input_values'. 478 """ 479 480 return value in input_values and 'selected="selected"' or '' 481 482 def get_option_list(self, value, values): 483 484 """ 485 Return a list of HTML element definitions for options describing the 486 given 'values', selecting the option with the specified 'value' if 487 present. 488 """ 489 490 options = [] 491 for available_value in values: 492 selected = self._get_selected(available_value, value) 493 options.append('<option value="%s" %s>%s</option>' % ( 494 escattr(available_value), selected, wikiutil.escape(available_value))) 495 return options 496 497 def _get_input(self, form, name, default=None): 498 499 """ 500 Return the input from 'form' having the given 'name', returning either 501 the input converted to an integer or the given 'default' (optional, None 502 if not specified). 503 """ 504 505 value = form.get(name, [None])[0] 506 if not value: # true if 0 obtained 507 return default 508 else: 509 return int(value) 510 511 def get_form(request): 512 513 "Work around disruptive MoinMoin changes in 1.9." 514 515 if hasattr(request, "values"): 516 return Form(request.values) 517 else: 518 return request.form 519 520 class send_headers_cls: 521 522 """ 523 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 524 1.9.x environment. 525 """ 526 527 def __init__(self, request): 528 self.request = request 529 530 def __call__(self, headers): 531 for header in headers: 532 parts = header.split(":") 533 self.request.headers.add(parts[0], ":".join(parts[1:])) 534 535 def get_send_headers(request): 536 537 "Return a function that can send response headers." 538 539 if hasattr(request, "http_headers"): 540 return request.http_headers 541 elif hasattr(request, "emit_http_headers"): 542 return request.emit_http_headers 543 else: 544 return send_headers_cls(request) 545 546 def escattr(s): 547 return wikiutil.escape(s, 1) 548 549 def getPathInfo(request): 550 if hasattr(request, "getPathinfo"): 551 return request.getPathinfo() 552 else: 553 return request.path 554 555 def getHeader(request, header_name, prefix=None): 556 557 """ 558 Using the 'request', return the value of the header with the given 559 'header_name', using the optional 'prefix' to obtain protocol-specific 560 headers if necessary. 561 562 If no value is found for the given 'header_name', None is returned. 563 """ 564 565 if hasattr(request, "getHeader"): 566 return request.getHeader(header_name) 567 elif hasattr(request, "headers"): 568 return request.headers.get(header_name) 569 else: 570 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 571 572 def writeHeaders(request, mimetype, metadata, status=None): 573 574 """ 575 Using the 'request', write resource headers using the given 'mimetype', 576 based on the given 'metadata'. If the optional 'status' is specified, set 577 the status header to the given value. 578 """ 579 580 send_headers = get_send_headers(request) 581 582 # Define headers. 583 584 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 585 586 # Define the last modified time. 587 # NOTE: Consider using request.httpDate. 588 589 latest_timestamp = metadata.get("last-modified") 590 if latest_timestamp: 591 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 592 593 if status: 594 headers.append("Status: %s" % status) 595 596 send_headers(headers) 597 598 # Content/media type and preferences support. 599 600 class MediaRange: 601 602 "A content/media type value which supports whole categories of data." 603 604 def __init__(self, media_range, accept_parameters=None): 605 self.media_range = media_range 606 self.accept_parameters = accept_parameters or {} 607 608 parts = media_range.split(";") 609 self.media_type = parts[0] 610 self.parameters = getMappingFromParameterStrings(parts[1:]) 611 612 # The media type is divided into category and subcategory. 613 614 parts = self.media_type.split("/") 615 self.category = parts[0] 616 self.subcategory = "/".join(parts[1:]) 617 618 def get_parts(self): 619 620 "Return the category, subcategory parts." 621 622 return self.category, self.subcategory 623 624 def get_specificity(self): 625 626 """ 627 Return the specificity of the media type in terms of the scope of the 628 category and subcategory, and also in terms of any qualifying 629 parameters. 630 """ 631 632 if "*" in self.get_parts(): 633 return -list(self.get_parts()).count("*") 634 else: 635 return len(self.parameters) 636 637 def permits(self, other): 638 639 """ 640 Return whether this media type permits the use of the 'other' media type 641 if suggested as suitable content. 642 """ 643 644 if not isinstance(other, MediaRange): 645 other = MediaRange(other) 646 647 category = categoryPermits(self.category, other.category) 648 subcategory = categoryPermits(self.subcategory, other.subcategory) 649 650 if category and subcategory: 651 if "*" not in (category, subcategory): 652 return not self.parameters or self.parameters == other.parameters 653 else: 654 return True 655 else: 656 return False 657 658 def __eq__(self, other): 659 660 """ 661 Return whether this media type is effectively the same as the 'other' 662 media type. 663 """ 664 665 if not isinstance(other, MediaRange): 666 other = MediaRange(other) 667 668 category = categoryMatches(self.category, other.category) 669 subcategory = categoryMatches(self.subcategory, other.subcategory) 670 671 if category and subcategory: 672 if "*" not in (category, subcategory): 673 return self.parameters == other.parameters or \ 674 not self.parameters or not other.parameters 675 else: 676 return True 677 else: 678 return False 679 680 def __ne__(self, other): 681 return not self.__eq__(other) 682 683 def __hash__(self): 684 return hash(self.media_range) 685 686 def __repr__(self): 687 return "MediaRange(%r)" % self.media_range 688 689 def categoryMatches(this, that): 690 691 """ 692 Return the basis of a match between 'this' and 'that' or False if the given 693 categories do not match. 694 """ 695 696 return (this == "*" or this == that) and this or \ 697 that == "*" and that or False 698 699 def categoryPermits(this, that): 700 701 """ 702 Return whether 'this' category permits 'that' category. Where 'this' is a 703 wildcard ("*"), 'that' should always match. A value of False is returned if 704 the categories do not otherwise match. 705 """ 706 707 return (this == "*" or this == that) and this or False 708 709 def getMappingFromParameterStrings(l): 710 711 """ 712 Return a mapping representing the list of "name=value" strings given by 'l'. 713 """ 714 715 parameters = {} 716 717 for parameter in l: 718 parts = parameter.split("=") 719 name = parts[0].strip() 720 value = "=".join(parts[1:]).strip() 721 parameters[name] = value 722 723 return parameters 724 725 def getContentPreferences(accept): 726 727 """ 728 Return a mapping from media types to parameters for content/media types 729 extracted from the given 'accept' header value. The mapping is returned in 730 the form of a list of (media type, parameters) tuples. 731 732 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 733 """ 734 735 preferences = [] 736 737 for field in accept.split(","): 738 739 # The media type with parameters (defined by the "media-range") is 740 # separated from any other parameters (defined as "accept-extension" 741 # parameters) by a quality parameter. 742 743 fparts = accept_regexp.split(field) 744 745 # The first part is always the media type. 746 747 media_type = fparts[0].strip() 748 749 # Any other parts can be interpreted as extension parameters. 750 751 if len(fparts) > 1: 752 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 753 else: 754 fparts = [] 755 756 # Each field in the preferences can incorporate parameters separated by 757 # semicolon characters. 758 759 parameters = getMappingFromParameterStrings(fparts) 760 media_range = MediaRange(media_type, parameters) 761 preferences.append(media_range) 762 763 return ContentPreferences(preferences) 764 765 class ContentPreferences: 766 767 "A wrapper around content preference information." 768 769 def __init__(self, preferences): 770 self.preferences = preferences 771 772 def __iter__(self): 773 return iter(self.preferences) 774 775 def get_ordered(self, by_quality=0): 776 777 """ 778 Return a list of content/media types in descending order of preference. 779 If 'by_quality' is set to a true value, the "q" value will be used as 780 the primary measure of preference; otherwise, only the specificity will 781 be considered. 782 """ 783 784 ordered = {} 785 786 for media_range in self.preferences: 787 specificity = media_range.get_specificity() 788 789 if by_quality: 790 q = float(media_range.accept_parameters.get("q", "1")) 791 key = q, specificity 792 else: 793 key = specificity 794 795 if not ordered.has_key(key): 796 ordered[key] = [] 797 798 ordered[key].append(media_range) 799 800 # Return the preferences in descending order of quality and specificity. 801 802 keys = ordered.keys() 803 keys.sort(reverse=True) 804 return [ordered[key] for key in keys] 805 806 def get_acceptable_types(self, available): 807 808 """ 809 Return content/media types from those in the 'available' list supported 810 by the known preferences grouped by preference level in descending order 811 of preference. 812 """ 813 814 matches = {} 815 available = set(available[:]) 816 817 for level in self.get_ordered(): 818 for media_range in level: 819 820 # Attempt to match available types. 821 822 found = set() 823 for available_type in available: 824 if media_range.permits(available_type): 825 q = float(media_range.accept_parameters.get("q", "1")) 826 if not matches.has_key(q): 827 matches[q] = [] 828 matches[q].append(available_type) 829 found.add(available_type) 830 831 # Stop looking for matches for matched available types. 832 833 if found: 834 available.difference_update(found) 835 836 # Sort the matches in descending order of quality. 837 838 all_q = matches.keys() 839 840 if all_q: 841 all_q.sort(reverse=True) 842 return [matches[q] for q in all_q] 843 else: 844 return [] 845 846 def get_preferred_types(self, available): 847 848 """ 849 Return the preferred content/media types from those in the 'available' 850 list, given the known preferences. 851 """ 852 853 preferred = self.get_acceptable_types(available) 854 if preferred: 855 return preferred[0] 856 else: 857 return [] 858 859 # Content type parsing. 860 861 def getContentTypeAndEncoding(content_type): 862 863 """ 864 Return a tuple with the content/media type and encoding, extracted from the 865 given 'content_type' header value. 866 """ 867 868 m = encoding_regexp.search(content_type) 869 if m: 870 return m.group("content_type"), m.group("encoding") 871 else: 872 return None, None 873 874 # Page access functions. 875 876 def getPageURL(page): 877 878 "Return the URL of the given 'page'." 879 880 request = page.request 881 return request.getQualifiedURL(page.url(request, relative=0)) 882 883 def getFormat(page): 884 885 "Get the format used on the given 'page'." 886 887 return page.pi["format"] 888 889 def getMetadata(page): 890 891 """ 892 Return a dictionary containing items describing for the given 'page' the 893 page's "created" time, "last-modified" time, "sequence" (or revision number) 894 and the "last-comment" made about the last edit. 895 """ 896 897 request = page.request 898 899 # Get the initial revision of the page. 900 901 revisions = page.getRevList() 902 903 if not revisions: 904 return {} 905 906 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 907 908 # Get the created and last modified times. 909 910 initial_revision = getPageRevision(event_page_initial) 911 912 metadata = {} 913 metadata["created"] = initial_revision["timestamp"] 914 latest_revision = getPageRevision(page) 915 metadata["last-modified"] = latest_revision["timestamp"] 916 metadata["sequence"] = len(revisions) - 1 917 metadata["last-comment"] = latest_revision["comment"] 918 919 return metadata 920 921 def getPageRevision(page): 922 923 "Return the revision details dictionary for the given 'page'." 924 925 # From Page.edit_info... 926 927 if hasattr(page, "editlog_entry"): 928 line = page.editlog_entry() 929 else: 930 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 931 932 # Similar to Page.mtime_usecs behaviour... 933 934 if line: 935 timestamp = line.ed_time_usecs 936 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 937 comment = line.comment 938 else: 939 mtime = 0 940 comment = "" 941 942 # Leave the time zone empty. 943 944 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 945 946 # Page parsing and formatting of embedded content. 947 948 def getPageParserClass(request): 949 950 "Using 'request', return a parser class for the current page's format." 951 952 return getParserClass(request, getFormat(request.page)) 953 954 def getParserClass(request, format): 955 956 """ 957 Return a parser class using the 'request' for the given 'format', returning 958 a plain text parser if no parser can be found for the specified 'format'. 959 """ 960 961 try: 962 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 963 except wikiutil.PluginMissingError: 964 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 965 966 def getFormatterClass(request, format): 967 968 """ 969 Return a formatter class using the 'request' for the given output 'format', 970 returning a plain text formatter if no formatter can be found for the 971 specified 'format'. 972 """ 973 974 try: 975 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 976 except wikiutil.PluginMissingError: 977 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 978 979 def formatText(text, request, fmt, parser_cls=None): 980 981 """ 982 Format the given 'text' using the specified 'request' and formatter 'fmt'. 983 Suppress line anchors in the output, and fix lists by indicating that a 984 paragraph has already been started. 985 """ 986 987 if not parser_cls: 988 parser_cls = getPageParserClass(request) 989 parser = parser_cls(text, request, line_anchors=False) 990 991 old_fmt = request.formatter 992 request.formatter = fmt 993 try: 994 return redirectedOutput(request, parser, fmt, inhibit_p=True) 995 finally: 996 request.formatter = old_fmt 997 998 def redirectedOutput(request, parser, fmt, **kw): 999 1000 "A fixed version of the request method of the same name." 1001 1002 buf = StringIO() 1003 request.redirect(buf) 1004 try: 1005 parser.format(fmt, **kw) 1006 if hasattr(fmt, "flush"): 1007 buf.write(fmt.flush(True)) 1008 finally: 1009 request.redirect() 1010 text = buf.getvalue() 1011 buf.close() 1012 return text 1013 1014 # Textual representations. 1015 1016 def getSimpleWikiText(text): 1017 1018 """ 1019 Return the plain text representation of the given 'text' which may employ 1020 certain Wiki syntax features, such as those providing verbatim or monospaced 1021 text. 1022 """ 1023 1024 # NOTE: Re-implementing support for verbatim text and linking avoidance. 1025 1026 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 1027 1028 def getEncodedWikiText(text): 1029 1030 "Encode the given 'text' in a verbatim representation." 1031 1032 return "<<Verbatim(%s)>>" % text 1033 1034 def getPrettyTitle(title): 1035 1036 "Return a nicely formatted version of the given 'title'." 1037 1038 return title.replace("_", " ").replace("/", u" ? ") 1039 1040 # User interface functions. 1041 1042 def getParameter(request, name, default=None): 1043 1044 """ 1045 Using the given 'request', return the value of the parameter with the given 1046 'name', returning the optional 'default' (or None) if no value was supplied 1047 in the 'request'. 1048 """ 1049 1050 return get_form(request).get(name, [default])[0] 1051 1052 def getQualifiedParameter(request, prefix, argname, default=None): 1053 1054 """ 1055 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1056 qualified parameter, returning the optional 'default' (or None) if no value 1057 was supplied in the 'request'. 1058 """ 1059 1060 argname = getQualifiedParameterName(prefix, argname) 1061 return getParameter(request, argname, default) 1062 1063 def getQualifiedParameterName(prefix, argname): 1064 1065 """ 1066 Return the qualified parameter name using the given 'prefix' and 'argname'. 1067 """ 1068 1069 if not prefix: 1070 return argname 1071 else: 1072 return "%s-%s" % (prefix, argname) 1073 1074 # Page-related functions. 1075 1076 def getPrettyPageName(page): 1077 1078 "Return a nicely formatted title/name for the given 'page'." 1079 1080 title = page.split_title(force=1) 1081 return getPrettyTitle(title) 1082 1083 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1084 1085 """ 1086 Using 'request', return a link to 'page' with the given link 'text' and 1087 optional 'query_string' and 'anchor'. 1088 """ 1089 1090 text = wikiutil.escape(text) 1091 return page.link_to_raw(request, text, query_string, anchor, **kw) 1092 1093 def linkToResource(url, request, text, query_string=None, anchor=None): 1094 1095 """ 1096 Using 'request', return a link to 'url' with the given link 'text' and 1097 optional 'query_string' and 'anchor'. 1098 """ 1099 1100 if anchor: 1101 url += "#%s" % anchor 1102 1103 if query_string: 1104 query_string = wikiutil.makeQueryString(query_string) 1105 url += "?%s" % query_string 1106 1107 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1108 1109 output = [] 1110 output.append(formatter.url(1, url)) 1111 output.append(formatter.text(text)) 1112 output.append(formatter.url(0)) 1113 return "".join(output) 1114 1115 def getFullPageName(parent, title): 1116 1117 """ 1118 Return a full page name from the given 'parent' page (can be empty or None) 1119 and 'title' (a simple page name). 1120 """ 1121 1122 if parent: 1123 return "%s/%s" % (parent.rstrip("/"), title) 1124 else: 1125 return title 1126 1127 # vim: tabstop=4 expandtab shiftwidth=4