1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Category extraction from pages. 37 38 category_regexp = None 39 40 # Simple content parsing. 41 42 verbatim_regexp = re.compile(ur'(?:' 43 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 44 ur'|' 45 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 46 ur'|' 47 ur'!(?P<verbatim3>.*?)(\s|$)?' 48 ur'|' 49 ur'`(?P<monospace>.*?)`' 50 ur'|' 51 ur'{{{(?P<preformatted>.*?)}}}' 52 ur')', re.UNICODE) 53 54 # Category discovery. 55 56 def getCategoryPattern(request): 57 global category_regexp 58 59 try: 60 return request.cfg.cache.page_category_regexact 61 except AttributeError: 62 63 # Use regular expression from MoinMoin 1.7.1 otherwise. 64 65 if category_regexp is None: 66 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 67 return category_regexp 68 69 def getCategories(request): 70 71 """ 72 From the AdvancedSearch macro, return a list of category page names using 73 the given 'request'. 74 """ 75 76 # This will return all pages with "Category" in the title. 77 78 cat_filter = getCategoryPattern(request).search 79 return request.rootpage.getPageList(filter=cat_filter) 80 81 def getCategoryMapping(category_pagenames, request): 82 83 """ 84 For the given 'category_pagenames' return a list of tuples of the form 85 (category name, category page name) using the given 'request'. 86 """ 87 88 cat_pattern = getCategoryPattern(request) 89 mapping = [] 90 for pagename in category_pagenames: 91 name = cat_pattern.match(pagename).group("key") 92 if name != "Category": 93 mapping.append((name, pagename)) 94 mapping.sort() 95 return mapping 96 97 def getCategoryPages(pagename, request): 98 99 """ 100 Return the pages associated with the given category 'pagename' using the 101 'request'. 102 """ 103 104 query = search.QueryParser().parse_query('category:%s' % pagename) 105 results = search.searchPages(request, query, "page_name") 106 107 cat_pattern = getCategoryPattern(request) 108 pages = [] 109 for page in results.hits: 110 if not cat_pattern.match(page.page_name): 111 pages.append(page) 112 return pages 113 114 def getAllCategoryPages(category_names, request): 115 116 """ 117 Return all pages belonging to the categories having the given 118 'category_names', using the given 'request'. 119 """ 120 121 pages = [] 122 pagenames = set() 123 124 for category_name in category_names: 125 126 # Get the pages and page names in the category. 127 128 pages_in_category = getCategoryPages(category_name, request) 129 130 # Visit each page in the category. 131 132 for page_in_category in pages_in_category: 133 pagename = page_in_category.page_name 134 135 # Only process each page once. 136 137 if pagename in pagenames: 138 continue 139 else: 140 pagenames.add(pagename) 141 142 pages.append(page_in_category) 143 144 return pages 145 146 # WikiDict functions. 147 148 def getWikiDict(pagename, request): 149 150 """ 151 Return the WikiDict provided by the given 'pagename' using the given 152 'request'. 153 """ 154 155 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 156 if hasattr(request.dicts, "dict"): 157 return request.dicts.dict(pagename) 158 else: 159 return request.dicts[pagename] 160 else: 161 return None 162 163 # Searching-related functions. 164 165 def getPagesFromResults(result_pages, request): 166 167 "Return genuine pages for the given 'result_pages' using the 'request'." 168 169 return [Page(request, page.page_name) for page in result_pages] 170 171 # Region/section parsing. 172 173 def getRegions(s, include_non_regions=False): 174 175 """ 176 Parse the string 's', returning a list of explicitly declared regions. 177 178 If 'include_non_regions' is specified as a true value, fragments will be 179 included for text between explicitly declared regions. 180 """ 181 182 regions = [] 183 marker = None 184 is_block = True 185 186 # Start a region for exposed text, if appropriate. 187 188 if include_non_regions: 189 regions.append("") 190 191 for match_text in marker_regexp.split(s): 192 193 # Capture section text. 194 195 if is_block: 196 if marker or include_non_regions: 197 regions[-1] += match_text 198 199 # Handle section markers. 200 201 elif not is_block: 202 203 # Close any open sections, returning to exposed text regions. 204 205 if marker: 206 if match_text.startswith("}") and len(marker) == len(match_text): 207 marker = None 208 209 # Start a region for exposed text, if appropriate. 210 211 if include_non_regions: 212 regions.append("") 213 214 # Without a current marker, start a section if an appropriate marker 215 # is given. 216 217 elif match_text.startswith("{"): 218 marker = match_text 219 regions.append("") 220 221 # Markers and section text are added to the current region. 222 223 regions[-1] += match_text 224 225 # The match text alternates between text between markers and the markers 226 # themselves. 227 228 is_block = not is_block 229 230 return regions 231 232 def getFragmentsFromRegions(regions): 233 234 """ 235 Return fragments from the given 'regions', each having the form 236 (format, arguments, body text). 237 """ 238 239 fragments = [] 240 241 for region in regions: 242 if region.startswith("{{{"): 243 244 body = region.lstrip("{").rstrip("}").lstrip() 245 246 # Remove any prelude and process metadata. 247 248 if body.startswith("#!"): 249 body = body[2:] 250 251 arguments, body = body.split("\n", 1) 252 253 # Get any parser/format declaration. 254 255 if arguments and not arguments[0].isspace(): 256 details = arguments.split(None, 1) 257 if len(details) == 2: 258 format, arguments = details 259 else: 260 format = details[0] 261 arguments = "" 262 else: 263 format = None 264 265 # Get the attributes/arguments for the region. 266 267 attributes = parseAttributes(arguments, False) 268 269 # Add an entry for the format in the attribute dictionary. 270 271 if format and not attributes.has_key(format): 272 attributes[format] = True 273 274 fragments.append((format, attributes, body)) 275 276 else: 277 fragments.append((None, {}, body)) 278 279 else: 280 fragments.append((None, {}, region)) 281 282 return fragments 283 284 def getFragments(s, include_non_regions=False): 285 286 """ 287 Return fragments for the given string 's', each having the form 288 (format, arguments, body text). 289 290 If 'include_non_regions' is specified as a true value, fragments will be 291 included for text between explicitly declared regions. 292 """ 293 294 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 295 296 # Region/section attribute parsing. 297 298 def parseAttributes(s, escape=True): 299 300 """ 301 Parse the section attributes string 's', returning a mapping of names to 302 values. If 'escape' is set to a true value, the attributes will be suitable 303 for use with the formatter API. If 'escape' is set to a false value, the 304 attributes will have any quoting removed. 305 """ 306 307 attrs = {} 308 f = StringIO(s) 309 name = None 310 need_value = False 311 312 for token in shlex(f): 313 314 # Capture the name if needed. 315 316 if name is None: 317 name = escape and wikiutil.escape(token) or strip_token(token) 318 319 # Detect either an equals sign or another name. 320 321 elif not need_value: 322 if token == "=": 323 need_value = True 324 else: 325 attrs[name.lower()] = escape and "true" or True 326 name = wikiutil.escape(token) 327 328 # Otherwise, capture a value. 329 330 else: 331 # Quoting of attributes done similarly to wikiutil.parseAttributes. 332 333 if token: 334 if escape: 335 if token[0] in ("'", '"'): 336 token = wikiutil.escape(token) 337 else: 338 token = '"%s"' % wikiutil.escape(token, 1) 339 else: 340 token = strip_token(token) 341 342 attrs[name.lower()] = token 343 name = None 344 need_value = False 345 346 # Handle any name-only attributes at the end of the collection. 347 348 if name and not need_value: 349 attrs[name.lower()] = escape and "true" or True 350 351 return attrs 352 353 def strip_token(token): 354 355 "Return the given 'token' stripped of quoting." 356 357 if token[0] in ("'", '"') and token[-1] == token[0]: 358 return token[1:-1] 359 else: 360 return token 361 362 # Request-related classes and associated functions. 363 364 class Form: 365 366 """ 367 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 368 environment. 369 """ 370 371 def __init__(self, form): 372 self.form = form 373 374 def has_key(self, name): 375 return not not self.form.getlist(name) 376 377 def get(self, name, default=None): 378 values = self.form.getlist(name) 379 if not values: 380 return default 381 else: 382 return values 383 384 def __getitem__(self, name): 385 return self.form.getlist(name) 386 387 class ActionSupport: 388 389 """ 390 Work around disruptive MoinMoin changes in 1.9, and also provide useful 391 convenience methods. 392 """ 393 394 def get_form(self): 395 return get_form(self.request) 396 397 def _get_selected(self, value, input_value): 398 399 """ 400 Return the HTML attribute text indicating selection of an option (or 401 otherwise) if 'value' matches 'input_value'. 402 """ 403 404 return input_value is not None and value == input_value and 'selected="selected"' or '' 405 406 def _get_selected_for_list(self, value, input_values): 407 408 """ 409 Return the HTML attribute text indicating selection of an option (or 410 otherwise) if 'value' matches one of the 'input_values'. 411 """ 412 413 return value in input_values and 'selected="selected"' or '' 414 415 def get_option_list(self, value, values): 416 417 """ 418 Return a list of HTML element definitions for options describing the 419 given 'values', selecting the option with the specified 'value' if 420 present. 421 """ 422 423 options = [] 424 for available_value in values: 425 selected = self._get_selected(available_value, value) 426 options.append('<option value="%s" %s>%s</option>' % ( 427 escattr(available_value), selected, wikiutil.escape(available_value))) 428 return options 429 430 def _get_input(self, form, name, default=None): 431 432 """ 433 Return the input from 'form' having the given 'name', returning either 434 the input converted to an integer or the given 'default' (optional, None 435 if not specified). 436 """ 437 438 value = form.get(name, [None])[0] 439 if not value: # true if 0 obtained 440 return default 441 else: 442 return int(value) 443 444 def get_form(request): 445 446 "Work around disruptive MoinMoin changes in 1.9." 447 448 if hasattr(request, "values"): 449 return Form(request.values) 450 else: 451 return request.form 452 453 class send_headers_cls: 454 455 """ 456 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 457 1.9.x environment. 458 """ 459 460 def __init__(self, request): 461 self.request = request 462 463 def __call__(self, headers): 464 for header in headers: 465 parts = header.split(":") 466 self.request.headers.add(parts[0], ":".join(parts[1:])) 467 468 def get_send_headers(request): 469 470 "Return a function that can send response headers." 471 472 if hasattr(request, "http_headers"): 473 return request.http_headers 474 elif hasattr(request, "emit_http_headers"): 475 return request.emit_http_headers 476 else: 477 return send_headers_cls(request) 478 479 def escattr(s): 480 return wikiutil.escape(s, 1) 481 482 def getPathInfo(request): 483 if hasattr(request, "getPathinfo"): 484 return request.getPathinfo() 485 else: 486 return request.path 487 488 def getHeader(request, header_name, prefix=None): 489 490 """ 491 Using the 'request', return the value of the header with the given 492 'header_name', using the optional 'prefix' to obtain protocol-specific 493 headers if necessary. 494 495 If no value is found for the given 'header_name', None is returned. 496 """ 497 498 if hasattr(request, "getHeader"): 499 return request.getHeader(header_name) 500 elif hasattr(request, "headers"): 501 return request.headers.get(header_name) 502 else: 503 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 504 505 def writeHeaders(request, mimetype, metadata, status=None): 506 507 """ 508 Using the 'request', write resource headers using the given 'mimetype', 509 based on the given 'metadata'. If the optional 'status' is specified, set 510 the status header to the given value. 511 """ 512 513 send_headers = get_send_headers(request) 514 515 # Define headers. 516 517 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 518 519 # Define the last modified time. 520 # NOTE: Consider using request.httpDate. 521 522 latest_timestamp = metadata.get("last-modified") 523 if latest_timestamp: 524 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 525 526 if status: 527 headers.append("Status: %s" % status) 528 529 send_headers(headers) 530 531 # Content/media type and preferences support. 532 533 class MediaRange: 534 535 "A content/media type value which supports whole categories of data." 536 537 def __init__(self, media_range, accept_parameters=None): 538 self.media_range = media_range 539 self.accept_parameters = accept_parameters or {} 540 541 parts = media_range.split(";") 542 self.media_type = parts[0] 543 self.parameters = getMappingFromParameterStrings(parts[1:]) 544 545 # The media type is divided into category and subcategory. 546 547 parts = self.media_type.split("/") 548 self.category = parts[0] 549 self.subcategory = "/".join(parts[1:]) 550 551 def get_parts(self): 552 553 "Return the category, subcategory parts." 554 555 return self.category, self.subcategory 556 557 def get_specificity(self): 558 559 """ 560 Return the specificity of the media type in terms of the scope of the 561 category and subcategory, and also in terms of any qualifying 562 parameters. 563 """ 564 565 if "*" in self.get_parts(): 566 return -list(self.get_parts()).count("*") 567 else: 568 return len(self.parameters) 569 570 def permits(self, other): 571 572 """ 573 Return whether this media type permits the use of the 'other' media type 574 if suggested as suitable content. 575 """ 576 577 if not isinstance(other, MediaRange): 578 other = MediaRange(other) 579 580 category = categoryPermits(self.category, other.category) 581 subcategory = categoryPermits(self.subcategory, other.subcategory) 582 583 if category and subcategory: 584 if "*" not in (category, subcategory): 585 return not self.parameters or self.parameters == other.parameters 586 else: 587 return True 588 else: 589 return False 590 591 def __eq__(self, other): 592 593 """ 594 Return whether this media type is effectively the same as the 'other' 595 media type. 596 """ 597 598 if not isinstance(other, MediaRange): 599 other = MediaRange(other) 600 601 category = categoryMatches(self.category, other.category) 602 subcategory = categoryMatches(self.subcategory, other.subcategory) 603 604 if category and subcategory: 605 if "*" not in (category, subcategory): 606 return self.parameters == other.parameters or \ 607 not self.parameters or not other.parameters 608 else: 609 return True 610 else: 611 return False 612 613 def __ne__(self, other): 614 return not self.__eq__(other) 615 616 def __hash__(self): 617 return hash(self.media_range) 618 619 def __repr__(self): 620 return "MediaRange(%r)" % self.media_range 621 622 def categoryMatches(this, that): 623 624 """ 625 Return the basis of a match between 'this' and 'that' or False if the given 626 categories do not match. 627 """ 628 629 return (this == "*" or this == that) and this or \ 630 that == "*" and that or False 631 632 def categoryPermits(this, that): 633 634 """ 635 Return whether 'this' category permits 'that' category. Where 'this' is a 636 wildcard ("*"), 'that' should always match. A value of False is returned if 637 the categories do not otherwise match. 638 """ 639 640 return (this == "*" or this == that) and this or False 641 642 def getMappingFromParameterStrings(l): 643 644 """ 645 Return a mapping representing the list of "name=value" strings given by 'l'. 646 """ 647 648 parameters = {} 649 650 for parameter in l: 651 parts = parameter.split("=") 652 name = parts[0].strip() 653 value = "=".join(parts[1:]).strip() 654 parameters[name] = value 655 656 return parameters 657 658 def getContentPreferences(accept): 659 660 """ 661 Return a mapping from media types to parameters for content/media types 662 extracted from the given 'accept' header value. The mapping is returned in 663 the form of a list of (media type, parameters) tuples. 664 665 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 666 """ 667 668 preferences = [] 669 670 for field in accept.split(","): 671 672 # The media type with parameters (defined by the "media-range") is 673 # separated from any other parameters (defined as "accept-extension" 674 # parameters) by a quality parameter. 675 676 fparts = accept_regexp.split(field) 677 678 # The first part is always the media type. 679 680 media_type = fparts[0].strip() 681 682 # Any other parts can be interpreted as extension parameters. 683 684 if len(fparts) > 1: 685 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 686 else: 687 fparts = [] 688 689 # Each field in the preferences can incorporate parameters separated by 690 # semicolon characters. 691 692 parameters = getMappingFromParameterStrings(fparts) 693 media_range = MediaRange(media_type, parameters) 694 preferences.append(media_range) 695 696 return ContentPreferences(preferences) 697 698 class ContentPreferences: 699 700 "A wrapper around content preference information." 701 702 def __init__(self, preferences): 703 self.preferences = preferences 704 705 def __iter__(self): 706 return iter(self.preferences) 707 708 def get_ordered(self, by_quality=0): 709 710 """ 711 Return a list of content/media types in descending order of preference. 712 If 'by_quality' is set to a true value, the "q" value will be used as 713 the primary measure of preference; otherwise, only the specificity will 714 be considered. 715 """ 716 717 ordered = {} 718 719 for media_range in self.preferences: 720 specificity = media_range.get_specificity() 721 722 if by_quality: 723 q = float(media_range.accept_parameters.get("q", "1")) 724 key = q, specificity 725 else: 726 key = specificity 727 728 if not ordered.has_key(key): 729 ordered[key] = [] 730 731 ordered[key].append(media_range) 732 733 # Return the preferences in descending order of quality and specificity. 734 735 keys = ordered.keys() 736 keys.sort(reverse=True) 737 return [ordered[key] for key in keys] 738 739 def get_acceptable_types(self, available): 740 741 """ 742 Return content/media types from those in the 'available' list supported 743 by the known preferences grouped by preference level in descending order 744 of preference. 745 """ 746 747 matches = {} 748 available = set(available[:]) 749 750 for level in self.get_ordered(): 751 for media_range in level: 752 753 # Attempt to match available types. 754 755 found = set() 756 for available_type in available: 757 if media_range.permits(available_type): 758 q = float(media_range.accept_parameters.get("q", "1")) 759 if not matches.has_key(q): 760 matches[q] = [] 761 matches[q].append(available_type) 762 found.add(available_type) 763 764 # Stop looking for matches for matched available types. 765 766 if found: 767 available.difference_update(found) 768 769 # Sort the matches in descending order of quality. 770 771 all_q = matches.keys() 772 773 if all_q: 774 all_q.sort(reverse=True) 775 return [matches[q] for q in all_q] 776 else: 777 return [] 778 779 def get_preferred_types(self, available): 780 781 """ 782 Return the preferred content/media types from those in the 'available' 783 list, given the known preferences. 784 """ 785 786 preferred = self.get_acceptable_types(available) 787 if preferred: 788 return preferred[0] 789 else: 790 return [] 791 792 # Content type parsing. 793 794 def getContentTypeAndEncoding(content_type): 795 796 """ 797 Return a tuple with the content/media type and encoding, extracted from the 798 given 'content_type' header value. 799 """ 800 801 m = encoding_regexp.search(content_type) 802 if m: 803 return m.group("content_type"), m.group("encoding") 804 else: 805 return None, None 806 807 # Page access functions. 808 809 def getPageURL(page): 810 811 "Return the URL of the given 'page'." 812 813 request = page.request 814 return request.getQualifiedURL(page.url(request, relative=0)) 815 816 def getFormat(page): 817 818 "Get the format used on the given 'page'." 819 820 return page.pi["format"] 821 822 def getMetadata(page): 823 824 """ 825 Return a dictionary containing items describing for the given 'page' the 826 page's "created" time, "last-modified" time, "sequence" (or revision number) 827 and the "last-comment" made about the last edit. 828 """ 829 830 request = page.request 831 832 # Get the initial revision of the page. 833 834 revisions = page.getRevList() 835 836 if not revisions: 837 return {} 838 839 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 840 841 # Get the created and last modified times. 842 843 initial_revision = getPageRevision(event_page_initial) 844 845 metadata = {} 846 metadata["created"] = initial_revision["timestamp"] 847 latest_revision = getPageRevision(page) 848 metadata["last-modified"] = latest_revision["timestamp"] 849 metadata["sequence"] = len(revisions) - 1 850 metadata["last-comment"] = latest_revision["comment"] 851 852 return metadata 853 854 def getPageRevision(page): 855 856 "Return the revision details dictionary for the given 'page'." 857 858 # From Page.edit_info... 859 860 if hasattr(page, "editlog_entry"): 861 line = page.editlog_entry() 862 else: 863 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 864 865 # Similar to Page.mtime_usecs behaviour... 866 867 if line: 868 timestamp = line.ed_time_usecs 869 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 870 comment = line.comment 871 else: 872 mtime = 0 873 comment = "" 874 875 # Leave the time zone empty. 876 877 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 878 879 # Page parsing and formatting of embedded content. 880 881 def getPageParserClass(request): 882 883 "Using 'request', return a parser class for the current page's format." 884 885 return getParserClass(request, getFormat(request.page)) 886 887 def getParserClass(request, format): 888 889 """ 890 Return a parser class using the 'request' for the given 'format', returning 891 a plain text parser if no parser can be found for the specified 'format'. 892 """ 893 894 try: 895 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 896 except wikiutil.PluginMissingError: 897 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 898 899 def getFormatterClass(request, format): 900 901 """ 902 Return a formatter class using the 'request' for the given output 'format', 903 returning a plain text formatter if no formatter can be found for the 904 specified 'format'. 905 """ 906 907 try: 908 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 909 except wikiutil.PluginMissingError: 910 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 911 912 def formatText(text, request, fmt, parser_cls=None): 913 914 """ 915 Format the given 'text' using the specified 'request' and formatter 'fmt'. 916 Suppress line anchors in the output, and fix lists by indicating that a 917 paragraph has already been started. 918 """ 919 920 if not parser_cls: 921 parser_cls = getPageParserClass(request) 922 parser = parser_cls(text, request, line_anchors=False) 923 924 old_fmt = request.formatter 925 request.formatter = fmt 926 try: 927 return redirectedOutput(request, parser, fmt, inhibit_p=True) 928 finally: 929 request.formatter = old_fmt 930 931 def redirectedOutput(request, parser, fmt, **kw): 932 933 "A fixed version of the request method of the same name." 934 935 buf = StringIO() 936 request.redirect(buf) 937 try: 938 parser.format(fmt, **kw) 939 if hasattr(fmt, "flush"): 940 buf.write(fmt.flush(True)) 941 finally: 942 request.redirect() 943 text = buf.getvalue() 944 buf.close() 945 return text 946 947 # Textual representations. 948 949 def getSimpleWikiText(text): 950 951 """ 952 Return the plain text representation of the given 'text' which may employ 953 certain Wiki syntax features, such as those providing verbatim or monospaced 954 text. 955 """ 956 957 # NOTE: Re-implementing support for verbatim text and linking avoidance. 958 959 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 960 961 def getEncodedWikiText(text): 962 963 "Encode the given 'text' in a verbatim representation." 964 965 return "<<Verbatim(%s)>>" % text 966 967 def getPrettyTitle(title): 968 969 "Return a nicely formatted version of the given 'title'." 970 971 return title.replace("_", " ").replace("/", u" ? ") 972 973 # User interface functions. 974 975 def getParameter(request, name, default=None): 976 977 """ 978 Using the given 'request', return the value of the parameter with the given 979 'name', returning the optional 'default' (or None) if no value was supplied 980 in the 'request'. 981 """ 982 983 return get_form(request).get(name, [default])[0] 984 985 def getQualifiedParameter(request, prefix, argname, default=None): 986 987 """ 988 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 989 qualified parameter, returning the optional 'default' (or None) if no value 990 was supplied in the 'request'. 991 """ 992 993 argname = getQualifiedParameterName(prefix, argname) 994 return getParameter(request, argname, default) 995 996 def getQualifiedParameterName(prefix, argname): 997 998 """ 999 Return the qualified parameter name using the given 'prefix' and 'argname'. 1000 """ 1001 1002 if prefix is None: 1003 return argname 1004 else: 1005 return "%s-%s" % (prefix, argname) 1006 1007 # Page-related functions. 1008 1009 def getPrettyPageName(page): 1010 1011 "Return a nicely formatted title/name for the given 'page'." 1012 1013 title = page.split_title(force=1) 1014 return getPrettyTitle(title) 1015 1016 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1017 1018 """ 1019 Using 'request', return a link to 'page' with the given link 'text' and 1020 optional 'query_string' and 'anchor'. 1021 """ 1022 1023 text = wikiutil.escape(text) 1024 return page.link_to_raw(request, text, query_string, anchor, **kw) 1025 1026 def linkToResource(url, request, text, query_string=None, anchor=None): 1027 1028 """ 1029 Using 'request', return a link to 'url' with the given link 'text' and 1030 optional 'query_string' and 'anchor'. 1031 """ 1032 1033 if anchor: 1034 url += "#%s" % anchor 1035 1036 if query_string: 1037 query_string = wikiutil.makeQueryString(query_string) 1038 url += "?%s" % query_string 1039 1040 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1041 1042 output = [] 1043 output.append(formatter.url(1, url)) 1044 output.append(formatter.text(text)) 1045 output.append(formatter.url(0)) 1046 return "".join(output) 1047 1048 def getFullPageName(parent, title): 1049 1050 """ 1051 Return a full page name from the given 'parent' page (can be empty or None) 1052 and 'title' (a simple page name). 1053 """ 1054 1055 if parent: 1056 return "%s/%s" % (parent.rstrip("/"), title) 1057 else: 1058 return title 1059 1060 # vim: tabstop=4 expandtab shiftwidth=4