1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2004 by Florian Festi, 8 2006 by Mikko Virkkil, 9 2005-2008 MoinMoin:ThomasWaldmann, 10 2007 MoinMoin:ReimarBauer. 11 @license: GNU GPL (v2 or later), see COPYING.txt for details. 12 """ 13 14 from DateSupport import * 15 from ItemSupport import ItemDirectoryStore 16 from MoinMoin.parser import text_moin_wiki 17 from MoinMoin.Page import Page 18 from MoinMoin.util import lock 19 from MoinMoin import config, search, wikiutil 20 from StringIO import StringIO 21 from shlex import shlex 22 import re 23 import time 24 import os 25 26 # Moin 1.9 request parameters. 27 28 try: 29 from MoinMoin.support.werkzeug.datastructures import MultiDict 30 except ImportError: 31 pass 32 33 __version__ = "0.3" 34 35 # Extraction of shared fragments. 36 37 marker_regexp_str = r"([{]{3,}|[}]{3,})" 38 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 39 40 # Extraction of headings. 41 42 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 43 44 # Category extraction from pages. 45 46 category_regexp = None 47 48 # Simple content parsing. 49 50 verbatim_regexp = re.compile(ur'(?:' 51 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 52 ur'|' 53 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 54 ur'|' 55 ur'!(?P<verbatim3>.*?)(\s|$)?' 56 ur'|' 57 ur'`(?P<monospace>.*?)`' 58 ur'|' 59 ur'{{{(?P<preformatted>.*?)}}}' 60 ur')', re.UNICODE) 61 62 # Category discovery. 63 64 def getCategoryPattern(request): 65 global category_regexp 66 67 try: 68 return request.cfg.cache.page_category_regexact 69 except AttributeError: 70 71 # Use regular expression from MoinMoin 1.7.1 otherwise. 72 73 if category_regexp is None: 74 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 75 return category_regexp 76 77 def getCategories(request): 78 79 """ 80 From the AdvancedSearch macro, return a list of category page names using 81 the given 'request'. 82 """ 83 84 # This will return all pages with "Category" in the title. 85 86 cat_filter = getCategoryPattern(request).search 87 return request.rootpage.getPageList(filter=cat_filter) 88 89 def getCategoryMapping(category_pagenames, request): 90 91 """ 92 For the given 'category_pagenames' return a list of tuples of the form 93 (category name, category page name) using the given 'request'. 94 """ 95 96 cat_pattern = getCategoryPattern(request) 97 mapping = [] 98 for pagename in category_pagenames: 99 name = cat_pattern.match(pagename).group("key") 100 if name != "Category": 101 mapping.append((name, pagename)) 102 mapping.sort() 103 return mapping 104 105 def getCategoryPages(pagename, request): 106 107 """ 108 Return the pages associated with the given category 'pagename' using the 109 'request'. 110 """ 111 112 query = search.QueryParser().parse_query('category:%s' % pagename) 113 results = search.searchPages(request, query, "page_name") 114 return filterCategoryPages(results, request) 115 116 def filterCategoryPages(results, request): 117 118 "Filter category pages from the given 'results' using the 'request'." 119 120 cat_pattern = getCategoryPattern(request) 121 pages = [] 122 for page in results.hits: 123 if not cat_pattern.match(page.page_name): 124 pages.append(page) 125 return pages 126 127 def getAllCategoryPages(category_names, request): 128 129 """ 130 Return all pages belonging to the categories having the given 131 'category_names', using the given 'request'. 132 """ 133 134 pages = [] 135 pagenames = set() 136 137 for category_name in category_names: 138 139 # Get the pages and page names in the category. 140 141 pages_in_category = getCategoryPages(category_name, request) 142 143 # Visit each page in the category. 144 145 for page_in_category in pages_in_category: 146 pagename = page_in_category.page_name 147 148 # Only process each page once. 149 150 if pagename in pagenames: 151 continue 152 else: 153 pagenames.add(pagename) 154 155 pages.append(page_in_category) 156 157 return pages 158 159 def getPagesForSearch(search_pattern, request): 160 161 """ 162 Return result pages for a search employing the given 'search_pattern' and 163 using the given 'request'. 164 """ 165 166 query = search.QueryParser().parse_query(search_pattern) 167 results = search.searchPages(request, query, "page_name") 168 return filterCategoryPages(results, request) 169 170 # WikiDict functions. 171 172 def getWikiDict(pagename, request, superuser=False): 173 174 """ 175 Return the WikiDict provided by the given 'pagename' using the given 176 'request'. If the optional 'superuser' is specified as a true value, no read 177 access check will be made. 178 """ 179 180 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 181 if hasattr(request.dicts, "dict"): 182 return request.dicts.dict(pagename) 183 else: 184 return request.dicts[pagename] 185 else: 186 return None 187 188 # Searching-related functions. 189 190 def getPagesFromResults(result_pages, request): 191 192 "Return genuine pages for the given 'result_pages' using the 'request'." 193 194 return [Page(request, page.page_name) for page in result_pages] 195 196 # Region/section parsing. 197 198 def getRegions(s, include_non_regions=False): 199 200 """ 201 Parse the string 's', returning a list of explicitly declared regions. 202 203 If 'include_non_regions' is specified as a true value, fragments will be 204 included for text between explicitly declared regions. 205 """ 206 207 regions = [] 208 marker = None 209 is_block = True 210 211 # Start a region for exposed text, if appropriate. 212 213 if include_non_regions: 214 regions.append("") 215 216 for match_text in marker_regexp.split(s): 217 218 # Capture section text. 219 220 if is_block: 221 if marker or include_non_regions: 222 regions[-1] += match_text 223 224 # Handle section markers. 225 226 else: 227 228 # Close any open sections, returning to exposed text regions. 229 230 if marker: 231 232 # Add any marker to the current region, regardless of whether it 233 # successfully closes a section. 234 235 regions[-1] += match_text 236 237 if match_text.startswith("}") and len(marker) == len(match_text): 238 marker = None 239 240 # Start a region for exposed text, if appropriate. 241 242 if include_non_regions: 243 regions.append("") 244 245 # Without a current marker, start a new section. 246 247 else: 248 marker = match_text 249 regions.append("") 250 251 # Add the marker to the new region. 252 253 regions[-1] += match_text 254 255 # The match text alternates between text between markers and the markers 256 # themselves. 257 258 is_block = not is_block 259 260 return regions 261 262 def getFragmentsFromRegions(regions): 263 264 """ 265 Return fragments from the given 'regions', each having the form 266 (format, attributes, body text). 267 """ 268 269 fragments = [] 270 271 for region in regions: 272 format, attributes, body, header, close = getFragmentFromRegion(region) 273 fragments.append((format, attributes, body)) 274 275 return fragments 276 277 def getFragmentFromRegion(region): 278 279 """ 280 Return a fragment for the given 'region' having the form (format, 281 attributes, body text, header, close), where the 'header' is the original 282 declaration of the 'region' or None if no explicit region is defined, and 283 'close' is the closing marker of the 'region' or None if no explicit region 284 is defined. 285 """ 286 287 if region.startswith("{{{"): 288 289 body = region.lstrip("{") 290 level = len(region) - len(body) 291 body = body.rstrip("}").lstrip() 292 293 # Remove any prelude and process metadata. 294 295 if body.startswith("#!"): 296 297 try: 298 declaration, body = body.split("\n", 1) 299 except ValueError: 300 declaration = body 301 body = "" 302 303 arguments = declaration[2:] 304 305 # Get any parser/format declaration. 306 307 if arguments and not arguments[0].isspace(): 308 details = arguments.split(None, 1) 309 if len(details) == 2: 310 format, arguments = details 311 else: 312 format = details[0] 313 arguments = "" 314 else: 315 format = None 316 317 # Get the attributes/arguments for the region. 318 319 attributes = parseAttributes(arguments, False) 320 321 # Add an entry for the format in the attribute dictionary. 322 323 if format and not attributes.has_key(format): 324 attributes[format] = True 325 326 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 327 328 else: 329 return None, {}, body, level * "{" + "\n", level * "}" 330 331 else: 332 return None, {}, region, None, None 333 334 def getFragments(s, include_non_regions=False): 335 336 """ 337 Return fragments for the given string 's', each having the form 338 (format, arguments, body text). 339 340 If 'include_non_regions' is specified as a true value, fragments will be 341 included for text between explicitly declared regions. 342 """ 343 344 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 345 346 # Heading extraction. 347 348 def getHeadings(s): 349 350 """ 351 Return tuples of the form (level, title, span) for headings found within the 352 given string 's'. The span is itself a (start, end) tuple indicating the 353 matching region of 's' for a heading declaration. 354 """ 355 356 headings = [] 357 358 for match in heading_regexp.finditer(s): 359 headings.append( 360 (len(match.group("level")), match.group("heading"), match.span()) 361 ) 362 363 return headings 364 365 # Region/section attribute parsing. 366 367 def parseAttributes(s, escape=True): 368 369 """ 370 Parse the section attributes string 's', returning a mapping of names to 371 values. If 'escape' is set to a true value, the attributes will be suitable 372 for use with the formatter API. If 'escape' is set to a false value, the 373 attributes will have any quoting removed. 374 """ 375 376 attrs = {} 377 f = StringIO(s) 378 name = None 379 need_value = False 380 lex = shlex(f) 381 lex.wordchars += "-" 382 383 for token in lex: 384 385 # Capture the name if needed. 386 387 if name is None: 388 name = escape and wikiutil.escape(token) or strip_token(token) 389 390 # Detect either an equals sign or another name. 391 392 elif not need_value: 393 if token == "=": 394 need_value = True 395 else: 396 attrs[name.lower()] = escape and "true" or True 397 name = wikiutil.escape(token) 398 399 # Otherwise, capture a value. 400 401 else: 402 # Quoting of attributes done similarly to wikiutil.parseAttributes. 403 404 if token: 405 if escape: 406 if token[0] in ("'", '"'): 407 token = wikiutil.escape(token) 408 else: 409 token = '"%s"' % wikiutil.escape(token, 1) 410 else: 411 token = strip_token(token) 412 413 attrs[name.lower()] = token 414 name = None 415 need_value = False 416 417 # Handle any name-only attributes at the end of the collection. 418 419 if name and not need_value: 420 attrs[name.lower()] = escape and "true" or True 421 422 return attrs 423 424 def strip_token(token): 425 426 "Return the given 'token' stripped of quoting." 427 428 if token[0] in ("'", '"') and token[-1] == token[0]: 429 return token[1:-1] 430 else: 431 return token 432 433 # Macro argument parsing. 434 435 def parseMacroArguments(args): 436 437 """ 438 Interpret the arguments. To support commas in labels, the label argument 439 should be quoted. For example: 440 441 "label=No, thanks!" 442 """ 443 444 try: 445 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 446 except AttributeError: 447 parsed_args = args.split(",") 448 449 pairs = [] 450 for arg in parsed_args: 451 if arg: 452 pair = arg.split("=", 1) 453 if len(pair) < 2: 454 pairs.append((None, arg)) 455 else: 456 pairs.append(tuple(pair)) 457 458 return pairs 459 460 def parseDictEntry(entry, unqualified=None): 461 462 """ 463 Return the parameters specified by the given dict 'entry' string. The 464 optional 'unqualified' parameter can be used to indicate parameters that 465 need not be specified together with a keyword and can therefore be populated 466 in the given order as such unqualified parameters are encountered. 467 468 NOTE: This is similar to parseMacroArguments but employs space as a 469 NOTE: separator and attempts to assign unqualified parameters. 470 """ 471 472 parameters = {} 473 unqualified = unqualified or () 474 475 try: 476 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 477 except AttributeError: 478 parsed_args = entry.split() 479 480 for arg in parsed_args: 481 try: 482 argname, argvalue = arg.split("=", 1) 483 484 # Detect unlikely parameter names. 485 486 if not argname.isalpha(): 487 raise ValueError 488 489 parameters[argname] = argvalue 490 491 # Unqualified parameters are assumed to be one of a recognised set. 492 493 except ValueError: 494 for argname in unqualified: 495 if not parameters.has_key(argname): 496 parameters[argname] = arg 497 break 498 499 return parameters 500 501 # Request-related classes and associated functions. 502 503 class Form: 504 505 """ 506 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 507 environment. 508 """ 509 510 def __init__(self, request): 511 self.request = request 512 self.form = request.values 513 514 def has_key(self, name): 515 return not not self.form.getlist(name) 516 517 def get(self, name, default=None): 518 values = self.form.getlist(name) 519 if not values: 520 return default 521 else: 522 return values 523 524 def __getitem__(self, name): 525 return self.form.getlist(name) 526 527 def __setitem__(self, name, value): 528 try: 529 self.form.setlist(name, value) 530 except TypeError: 531 self._write_enable() 532 self.form.setlist(name, value) 533 534 def __delitem__(self, name): 535 try: 536 del self.form[name] 537 except TypeError: 538 self._write_enable() 539 del self.form[name] 540 541 def _write_enable(self): 542 self.form = self.request.values = MultiDict(self.form) 543 544 def keys(self): 545 return self.form.keys() 546 547 def items(self): 548 return self.form.lists() 549 550 class ActionSupport: 551 552 """ 553 Work around disruptive MoinMoin changes in 1.9, and also provide useful 554 convenience methods. 555 """ 556 557 def get_form(self): 558 return get_form(self.request) 559 560 def _get_selected(self, value, input_value): 561 562 """ 563 Return the HTML attribute text indicating selection of an option (or 564 otherwise) if 'value' matches 'input_value'. 565 """ 566 567 return input_value is not None and value == input_value and 'selected="selected"' or '' 568 569 def _get_selected_for_list(self, value, input_values): 570 571 """ 572 Return the HTML attribute text indicating selection of an option (or 573 otherwise) if 'value' matches one of the 'input_values'. 574 """ 575 576 return value in input_values and 'selected="selected"' or '' 577 578 def get_option_list(self, value, values): 579 580 """ 581 Return a list of HTML element definitions for options describing the 582 given 'values', selecting the option with the specified 'value' if 583 present. 584 """ 585 586 options = [] 587 for available_value in values: 588 selected = self._get_selected(available_value, value) 589 options.append('<option value="%s" %s>%s</option>' % ( 590 escattr(available_value), selected, wikiutil.escape(available_value))) 591 return options 592 593 def _get_input(self, form, name, default=None): 594 595 """ 596 Return the input from 'form' having the given 'name', returning either 597 the input converted to an integer or the given 'default' (optional, None 598 if not specified). 599 """ 600 601 value = form.get(name, [None])[0] 602 if not value: # true if 0 obtained 603 return default 604 else: 605 return int(value) 606 607 def get_form(request): 608 609 "Work around disruptive MoinMoin changes in 1.9." 610 611 if hasattr(request, "values"): 612 return Form(request) 613 else: 614 return request.form 615 616 class send_headers_cls: 617 618 """ 619 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 620 1.9.x environment. 621 """ 622 623 def __init__(self, request): 624 self.request = request 625 626 def __call__(self, headers): 627 for header in headers: 628 parts = header.split(":") 629 self.request.headers.add(parts[0], ":".join(parts[1:])) 630 631 def get_send_headers(request): 632 633 "Return a function that can send response headers." 634 635 if hasattr(request, "http_headers"): 636 return request.http_headers 637 elif hasattr(request, "emit_http_headers"): 638 return request.emit_http_headers 639 else: 640 return send_headers_cls(request) 641 642 def escattr(s): 643 return wikiutil.escape(s, 1) 644 645 def getPathInfo(request): 646 if hasattr(request, "getPathinfo"): 647 return request.getPathinfo() 648 else: 649 return request.path 650 651 def getHeader(request, header_name, prefix=None): 652 653 """ 654 Using the 'request', return the value of the header with the given 655 'header_name', using the optional 'prefix' to obtain protocol-specific 656 headers if necessary. 657 658 If no value is found for the given 'header_name', None is returned. 659 """ 660 661 if hasattr(request, "getHeader"): 662 return request.getHeader(header_name) 663 elif hasattr(request, "headers"): 664 return request.headers.get(header_name) 665 elif hasattr(request, "env"): 666 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 667 else: 668 return None 669 670 def writeHeaders(request, mimetype, metadata, status=None): 671 672 """ 673 Using the 'request', write resource headers using the given 'mimetype', 674 based on the given 'metadata'. If the optional 'status' is specified, set 675 the status header to the given value. 676 """ 677 678 send_headers = get_send_headers(request) 679 680 # Define headers. 681 682 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 683 684 # Define the last modified time. 685 # NOTE: Consider using request.httpDate. 686 687 latest_timestamp = metadata.get("last-modified") 688 if latest_timestamp: 689 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 690 691 if status: 692 headers.append("Status: %s" % status) 693 694 send_headers(headers) 695 696 # Page access functions. 697 698 def getPageURL(page): 699 700 "Return the URL of the given 'page'." 701 702 request = page.request 703 return request.getQualifiedURL(page.url(request, relative=0)) 704 705 def getFormat(page): 706 707 "Get the format used on the given 'page'." 708 709 return page.pi["format"] 710 711 def getMetadata(page): 712 713 """ 714 Return a dictionary containing items describing for the given 'page' the 715 page's "created" time, "last-modified" time, "sequence" (or revision number) 716 and the "last-comment" made about the last edit. 717 """ 718 719 request = page.request 720 721 # Get the initial revision of the page. 722 723 revisions = page.getRevList() 724 725 if not revisions: 726 return {} 727 728 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 729 730 # Get the created and last modified times. 731 732 initial_revision = getPageRevision(event_page_initial) 733 734 metadata = {} 735 metadata["created"] = initial_revision["timestamp"] 736 latest_revision = getPageRevision(page) 737 metadata["last-modified"] = latest_revision["timestamp"] 738 metadata["sequence"] = len(revisions) - 1 739 metadata["last-comment"] = latest_revision["comment"] 740 741 return metadata 742 743 def getPageRevision(page): 744 745 "Return the revision details dictionary for the given 'page'." 746 747 # From Page.edit_info... 748 749 if hasattr(page, "editlog_entry"): 750 line = page.editlog_entry() 751 else: 752 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 753 754 # Similar to Page.mtime_usecs behaviour... 755 756 if line: 757 timestamp = line.ed_time_usecs 758 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 759 comment = line.comment 760 else: 761 mtime = 0 762 comment = "" 763 764 # Give the time zone as UTC. 765 766 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 767 768 # Page parsing and formatting of embedded content. 769 770 def getPageParserClass(request): 771 772 "Using 'request', return a parser class for the current page's format." 773 774 return getParserClass(request, getFormat(request.page)) 775 776 def getParserClass(request, format): 777 778 """ 779 Return a parser class using the 'request' for the given 'format', returning 780 a plain text parser if no parser can be found for the specified 'format'. 781 """ 782 783 try: 784 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 785 except wikiutil.PluginMissingError: 786 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 787 788 def getFormatterClass(request, format): 789 790 """ 791 Return a formatter class using the 'request' for the given output 'format', 792 returning a plain text formatter if no formatter can be found for the 793 specified 'format'. 794 """ 795 796 try: 797 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 798 except wikiutil.PluginMissingError: 799 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 800 801 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 802 803 """ 804 Format the given 'text' using the specified 'request' and formatter 'fmt'. 805 Suppress line anchors in the output, and fix lists by indicating that a 806 paragraph has already been started. 807 """ 808 809 if not parser_cls: 810 parser_cls = getPageParserClass(request) 811 parser = parser_cls(text, request, line_anchors=False) 812 813 old_fmt = request.formatter 814 request.formatter = fmt 815 try: 816 if isinstance(parser, text_moin_wiki.Parser): 817 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 818 else: 819 return redirectedOutput(request, parser, fmt) 820 finally: 821 request.formatter = old_fmt 822 823 def redirectedOutput(request, parser, fmt, **kw): 824 825 "A fixed version of the request method of the same name." 826 827 buf = StringIO() 828 request.redirect(buf) 829 try: 830 parser.format(fmt, **kw) 831 if hasattr(fmt, "flush"): 832 buf.write(fmt.flush(True)) 833 finally: 834 request.redirect() 835 text = buf.getvalue() 836 buf.close() 837 return text 838 839 # Finding components for content types. 840 841 def getParsersForContentType(cfg, mimetype): 842 843 """ 844 Find parsers that support the given 'mimetype', constructing a dictionary 845 mapping content types to lists of parsers that is then cached in the 'cfg' 846 object. A list of suitable parsers is returned for 'mimetype'. 847 """ 848 849 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 850 available = {} 851 852 for name in wikiutil.getPlugins("parser", cfg): 853 854 # Import each parser in order to inspect supported content types. 855 856 try: 857 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 858 except wikiutil.PluginMissingError: 859 continue 860 861 # Attempt to determine supported content types. 862 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 863 # NOTE: used. 864 865 if hasattr(parser_cls, "input_mimetypes"): 866 for input_mimetype in parser_cls.input_mimetypes: 867 if not available.has_key(input_mimetype): 868 available[input_mimetype] = [] 869 available[input_mimetype].append(parser_cls) 870 871 # Support some basic parsers. 872 873 elif name == "text_moin_wiki": 874 available["text/moin-wiki"] = [parser_cls] 875 available["text/moin"] = [parser_cls] 876 elif name == "text_html": 877 available["text/html"] = [parser_cls] 878 available["application/xhtml+xml"] = [parser_cls] 879 880 cfg.cache.MIMETYPE_TO_PARSER = available 881 882 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 883 884 # Textual representations. 885 886 def getSimpleWikiText(text): 887 888 """ 889 Return the plain text representation of the given 'text' which may employ 890 certain Wiki syntax features, such as those providing verbatim or monospaced 891 text. 892 """ 893 894 # NOTE: Re-implementing support for verbatim text and linking avoidance. 895 896 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 897 898 def getEncodedWikiText(text): 899 900 "Encode the given 'text' in a verbatim representation." 901 902 return "<<Verbatim(%s)>>" % text 903 904 def getPrettyTitle(title): 905 906 "Return a nicely formatted version of the given 'title'." 907 908 return title.replace("_", " ").replace("/", u" ? ") 909 910 # User interface functions. 911 912 def getParameter(request, name, default=None): 913 914 """ 915 Using the given 'request', return the value of the parameter with the given 916 'name', returning the optional 'default' (or None) if no value was supplied 917 in the 'request'. 918 """ 919 920 return get_form(request).get(name, [default])[0] 921 922 def getQualifiedParameter(request, prefix, argname, default=None): 923 924 """ 925 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 926 qualified parameter, returning the optional 'default' (or None) if no value 927 was supplied in the 'request'. 928 """ 929 930 argname = getQualifiedParameterName(prefix, argname) 931 return getParameter(request, argname, default) 932 933 def getQualifiedParameterName(prefix, argname): 934 935 """ 936 Return the qualified parameter name using the given 'prefix' and 'argname'. 937 """ 938 939 if not prefix: 940 return argname 941 else: 942 return "%s-%s" % (prefix, argname) 943 944 # Page-related functions. 945 946 def getPrettyPageName(page): 947 948 "Return a nicely formatted title/name for the given 'page'." 949 950 title = page.split_title(force=1) 951 return getPrettyTitle(title) 952 953 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 954 955 """ 956 Using 'request', return a link to 'page' with the given link 'text' and 957 optional 'query_string' and 'anchor'. 958 """ 959 960 text = wikiutil.escape(text) 961 return page.link_to_raw(request, text, query_string, anchor, **kw) 962 963 def linkToResource(url, request, text, query_string=None, anchor=None): 964 965 """ 966 Using 'request', return a link to 'url' with the given link 'text' and 967 optional 'query_string' and 'anchor'. 968 """ 969 970 if anchor: 971 url += "#%s" % anchor 972 973 if query_string: 974 query_string = wikiutil.makeQueryString(query_string) 975 url += "?%s" % query_string 976 977 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 978 979 output = [] 980 output.append(formatter.url(1, url)) 981 output.append(formatter.text(text)) 982 output.append(formatter.url(0)) 983 return "".join(output) 984 985 def getFullPageName(parent, title): 986 987 """ 988 Return a full page name from the given 'parent' page (can be empty or None) 989 and 'title' (a simple page name). 990 """ 991 992 if parent: 993 return "%s/%s" % (parent.rstrip("/"), title) 994 else: 995 return title 996 997 # Content storage support. 998 999 class ItemStore(ItemDirectoryStore): 1000 1001 "A page-specific item store." 1002 1003 def __init__(self, page, item_dir="items", lock_dir="item_locks"): 1004 1005 "Initialise an item store for the given 'page'." 1006 1007 item_dir_path = tuple(item_dir.split("/")) 1008 lock_dir_path = tuple(lock_dir.split("/")) 1009 ItemDirectoryStore.__init__(self, page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 1010 self.page = page 1011 1012 def can_write(self): 1013 1014 """ 1015 Return whether the user associated with the request can write to the 1016 page owning this store. 1017 """ 1018 1019 user = self.page.request.user 1020 return user and user.may.write(self.page.page_name) 1021 1022 def can_read(self): 1023 1024 """ 1025 Return whether the user associated with the request can read from the 1026 page owning this store. 1027 """ 1028 1029 user = self.page.request.user 1030 return user and user.may.read(self.page.page_name) 1031 1032 def can_delete(self): 1033 1034 """ 1035 Return whether the user associated with the request can delete the 1036 page owning this store. 1037 """ 1038 1039 user = self.page.request.user 1040 return user and user.may.delete(self.page.page_name) 1041 1042 # High-level methods. 1043 1044 def append(self, item): 1045 1046 "Append the given 'item' to the store." 1047 1048 if not self.can_write(): 1049 return 1050 1051 ItemDirectoryStore.append(self, item) 1052 1053 def __len__(self): 1054 1055 "Return the number of items in the store." 1056 1057 if not self.can_read(): 1058 return 0 1059 1060 return ItemDirectoryStore.__len__(self) 1061 1062 def __getitem__(self, number): 1063 1064 "Return the item with the given 'number'." 1065 1066 if not self.can_read(): 1067 raise IndexError, number 1068 1069 return ItemDirectoryStore.__getitem__(self, number) 1070 1071 def __delitem__(self, number): 1072 1073 "Remove the item with the given 'number'." 1074 1075 if not self.can_delete(): 1076 return 1077 1078 return ItemDirectoryStore.__delitem__(self, number) 1079 1080 # vim: tabstop=4 expandtab shiftwidth=4