1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2004 by Florian Festi, 8 2006 by Mikko Virkkil, 9 2005-2008 MoinMoin:ThomasWaldmann, 10 2007 MoinMoin:ReimarBauer. 11 @license: GNU GPL (v2 or later), see COPYING.txt for details. 12 """ 13 14 from DateSupport import * 15 from ItemSupport import ItemDirectoryStore 16 from MoinMoin.parser import text_moin_wiki 17 from MoinMoin.Page import Page 18 from MoinMoin.util import lock 19 from MoinMoin import config, search, wikiutil 20 from StringIO import StringIO 21 from shlex import shlex 22 import re 23 import time 24 import os 25 26 # Moin 1.9 request parameters. 27 28 try: 29 from MoinMoin.support.werkzeug.datastructures import MultiDict 30 except ImportError: 31 pass 32 33 __version__ = "0.3" 34 35 # Extraction of shared fragments. 36 37 marker_regexp_str = r"([{]{3,}|[}]{3,})" 38 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 39 40 # Extraction of headings. 41 42 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 43 44 # Category extraction from pages. 45 46 category_regexp = None 47 48 # Simple content parsing. 49 50 verbatim_regexp = re.compile(ur'(?:' 51 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 52 ur'|' 53 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 54 ur'|' 55 ur'!(?P<verbatim3>.*?)(\s|$)?' 56 ur'|' 57 ur'`(?P<monospace>.*?)`' 58 ur'|' 59 ur'{{{(?P<preformatted>.*?)}}}' 60 ur')', re.UNICODE) 61 62 # Category discovery. 63 64 def getCategoryPattern(request): 65 global category_regexp 66 67 try: 68 return request.cfg.cache.page_category_regexact 69 except AttributeError: 70 71 # Use regular expression from MoinMoin 1.7.1 otherwise. 72 73 if category_regexp is None: 74 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 75 return category_regexp 76 77 def getCategories(request): 78 79 """ 80 From the AdvancedSearch macro, return a list of category page names using 81 the given 'request'. 82 """ 83 84 # This will return all pages with "Category" in the title. 85 86 cat_filter = getCategoryPattern(request).search 87 return request.rootpage.getPageList(filter=cat_filter) 88 89 def getCategoryMapping(category_pagenames, request): 90 91 """ 92 For the given 'category_pagenames' return a list of tuples of the form 93 (category name, category page name) using the given 'request'. 94 """ 95 96 cat_pattern = getCategoryPattern(request) 97 mapping = [] 98 for pagename in category_pagenames: 99 name = cat_pattern.match(pagename).group("key") 100 if name != "Category": 101 mapping.append((name, pagename)) 102 mapping.sort() 103 return mapping 104 105 def getCategoryPages(pagename, request): 106 107 """ 108 Return the pages associated with the given category 'pagename' using the 109 'request'. 110 """ 111 112 query = search.QueryParser().parse_query('category:%s' % pagename) 113 results = search.searchPages(request, query, "page_name") 114 return filterCategoryPages(results, request) 115 116 def filterCategoryPages(results, request): 117 118 "Filter category pages from the given 'results' using the 'request'." 119 120 cat_pattern = getCategoryPattern(request) 121 pages = [] 122 for page in results.hits: 123 if not cat_pattern.match(page.page_name): 124 pages.append(page) 125 return pages 126 127 def getAllCategoryPages(category_names, request): 128 129 """ 130 Return all pages belonging to the categories having the given 131 'category_names', using the given 'request'. 132 """ 133 134 pages = [] 135 pagenames = set() 136 137 for category_name in category_names: 138 139 # Get the pages and page names in the category. 140 141 pages_in_category = getCategoryPages(category_name, request) 142 143 # Visit each page in the category. 144 145 for page_in_category in pages_in_category: 146 pagename = page_in_category.page_name 147 148 # Only process each page once. 149 150 if pagename in pagenames: 151 continue 152 else: 153 pagenames.add(pagename) 154 155 pages.append(page_in_category) 156 157 return pages 158 159 def getPagesForSearch(search_pattern, request): 160 161 """ 162 Return result pages for a search employing the given 'search_pattern' and 163 using the given 'request'. 164 """ 165 166 query = search.QueryParser().parse_query(search_pattern) 167 results = search.searchPages(request, query, "page_name") 168 return filterCategoryPages(results, request) 169 170 # WikiDict functions. 171 172 def getWikiDict(pagename, request): 173 174 """ 175 Return the WikiDict provided by the given 'pagename' using the given 176 'request'. 177 """ 178 179 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 180 if hasattr(request.dicts, "dict"): 181 return request.dicts.dict(pagename) 182 else: 183 return request.dicts[pagename] 184 else: 185 return None 186 187 # Searching-related functions. 188 189 def getPagesFromResults(result_pages, request): 190 191 "Return genuine pages for the given 'result_pages' using the 'request'." 192 193 return [Page(request, page.page_name) for page in result_pages] 194 195 # Region/section parsing. 196 197 def getRegions(s, include_non_regions=False): 198 199 """ 200 Parse the string 's', returning a list of explicitly declared regions. 201 202 If 'include_non_regions' is specified as a true value, fragments will be 203 included for text between explicitly declared regions. 204 """ 205 206 regions = [] 207 marker = None 208 is_block = True 209 210 # Start a region for exposed text, if appropriate. 211 212 if include_non_regions: 213 regions.append("") 214 215 for match_text in marker_regexp.split(s): 216 217 # Capture section text. 218 219 if is_block: 220 if marker or include_non_regions: 221 regions[-1] += match_text 222 223 # Handle section markers. 224 225 else: 226 227 # Close any open sections, returning to exposed text regions. 228 229 if marker: 230 231 # Add any marker to the current region, regardless of whether it 232 # successfully closes a section. 233 234 regions[-1] += match_text 235 236 if match_text.startswith("}") and len(marker) == len(match_text): 237 marker = None 238 239 # Start a region for exposed text, if appropriate. 240 241 if include_non_regions: 242 regions.append("") 243 244 # Without a current marker, start a new section. 245 246 else: 247 marker = match_text 248 regions.append("") 249 250 # Add the marker to the new region. 251 252 regions[-1] += match_text 253 254 # The match text alternates between text between markers and the markers 255 # themselves. 256 257 is_block = not is_block 258 259 return regions 260 261 def getFragmentsFromRegions(regions): 262 263 """ 264 Return fragments from the given 'regions', each having the form 265 (format, attributes, body text). 266 """ 267 268 fragments = [] 269 270 for region in regions: 271 format, attributes, body, header, close = getFragmentFromRegion(region) 272 fragments.append((format, attributes, body)) 273 274 return fragments 275 276 def getFragmentFromRegion(region): 277 278 """ 279 Return a fragment for the given 'region' having the form (format, 280 attributes, body text, header, close), where the 'header' is the original 281 declaration of the 'region' or None if no explicit region is defined, and 282 'close' is the closing marker of the 'region' or None if no explicit region 283 is defined. 284 """ 285 286 if region.startswith("{{{"): 287 288 body = region.lstrip("{") 289 level = len(region) - len(body) 290 body = body.rstrip("}").lstrip() 291 292 # Remove any prelude and process metadata. 293 294 if body.startswith("#!"): 295 296 try: 297 declaration, body = body.split("\n", 1) 298 except ValueError: 299 declaration = body 300 body = "" 301 302 arguments = declaration[2:] 303 304 # Get any parser/format declaration. 305 306 if arguments and not arguments[0].isspace(): 307 details = arguments.split(None, 1) 308 if len(details) == 2: 309 format, arguments = details 310 else: 311 format = details[0] 312 arguments = "" 313 else: 314 format = None 315 316 # Get the attributes/arguments for the region. 317 318 attributes = parseAttributes(arguments, False) 319 320 # Add an entry for the format in the attribute dictionary. 321 322 if format and not attributes.has_key(format): 323 attributes[format] = True 324 325 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 326 327 else: 328 return None, {}, body, level * "{" + "\n", level * "}" 329 330 else: 331 return None, {}, region, None, None 332 333 def getFragments(s, include_non_regions=False): 334 335 """ 336 Return fragments for the given string 's', each having the form 337 (format, arguments, body text). 338 339 If 'include_non_regions' is specified as a true value, fragments will be 340 included for text between explicitly declared regions. 341 """ 342 343 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 344 345 # Heading extraction. 346 347 def getHeadings(s): 348 349 """ 350 Return tuples of the form (level, title, span) for headings found within the 351 given string 's'. The span is itself a (start, end) tuple indicating the 352 matching region of 's' for a heading declaration. 353 """ 354 355 headings = [] 356 357 for match in heading_regexp.finditer(s): 358 headings.append( 359 (len(match.group("level")), match.group("heading"), match.span()) 360 ) 361 362 return headings 363 364 # Region/section attribute parsing. 365 366 def parseAttributes(s, escape=True): 367 368 """ 369 Parse the section attributes string 's', returning a mapping of names to 370 values. If 'escape' is set to a true value, the attributes will be suitable 371 for use with the formatter API. If 'escape' is set to a false value, the 372 attributes will have any quoting removed. 373 """ 374 375 attrs = {} 376 f = StringIO(s) 377 name = None 378 need_value = False 379 lex = shlex(f) 380 lex.wordchars += "-" 381 382 for token in lex: 383 384 # Capture the name if needed. 385 386 if name is None: 387 name = escape and wikiutil.escape(token) or strip_token(token) 388 389 # Detect either an equals sign or another name. 390 391 elif not need_value: 392 if token == "=": 393 need_value = True 394 else: 395 attrs[name.lower()] = escape and "true" or True 396 name = wikiutil.escape(token) 397 398 # Otherwise, capture a value. 399 400 else: 401 # Quoting of attributes done similarly to wikiutil.parseAttributes. 402 403 if token: 404 if escape: 405 if token[0] in ("'", '"'): 406 token = wikiutil.escape(token) 407 else: 408 token = '"%s"' % wikiutil.escape(token, 1) 409 else: 410 token = strip_token(token) 411 412 attrs[name.lower()] = token 413 name = None 414 need_value = False 415 416 # Handle any name-only attributes at the end of the collection. 417 418 if name and not need_value: 419 attrs[name.lower()] = escape and "true" or True 420 421 return attrs 422 423 def strip_token(token): 424 425 "Return the given 'token' stripped of quoting." 426 427 if token[0] in ("'", '"') and token[-1] == token[0]: 428 return token[1:-1] 429 else: 430 return token 431 432 # Macro argument parsing. 433 434 def parseMacroArguments(args): 435 436 """ 437 Interpret the arguments. To support commas in labels, the label argument 438 should be quoted. For example: 439 440 "label=No, thanks!" 441 """ 442 443 try: 444 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 445 except AttributeError: 446 parsed_args = args.split(",") 447 448 pairs = [] 449 for arg in parsed_args: 450 if arg: 451 pair = arg.split("=", 1) 452 if len(pair) < 2: 453 pairs.append((None, arg)) 454 else: 455 pairs.append(tuple(pair)) 456 457 return pairs 458 459 # Request-related classes and associated functions. 460 461 class Form: 462 463 """ 464 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 465 environment. 466 """ 467 468 def __init__(self, request): 469 self.request = request 470 self.form = request.values 471 472 def has_key(self, name): 473 return not not self.form.getlist(name) 474 475 def get(self, name, default=None): 476 values = self.form.getlist(name) 477 if not values: 478 return default 479 else: 480 return values 481 482 def __getitem__(self, name): 483 return self.form.getlist(name) 484 485 def __setitem__(self, name, value): 486 try: 487 self.form.setlist(name, value) 488 except TypeError: 489 self._write_enable() 490 self.form.setlist(name, value) 491 492 def __delitem__(self, name): 493 try: 494 del self.form[name] 495 except TypeError: 496 self._write_enable() 497 del self.form[name] 498 499 def _write_enable(self): 500 self.form = self.request.values = MultiDict(self.form) 501 502 def keys(self): 503 return self.form.keys() 504 505 def items(self): 506 return self.form.lists() 507 508 class ActionSupport: 509 510 """ 511 Work around disruptive MoinMoin changes in 1.9, and also provide useful 512 convenience methods. 513 """ 514 515 def get_form(self): 516 return get_form(self.request) 517 518 def _get_selected(self, value, input_value): 519 520 """ 521 Return the HTML attribute text indicating selection of an option (or 522 otherwise) if 'value' matches 'input_value'. 523 """ 524 525 return input_value is not None and value == input_value and 'selected="selected"' or '' 526 527 def _get_selected_for_list(self, value, input_values): 528 529 """ 530 Return the HTML attribute text indicating selection of an option (or 531 otherwise) if 'value' matches one of the 'input_values'. 532 """ 533 534 return value in input_values and 'selected="selected"' or '' 535 536 def get_option_list(self, value, values): 537 538 """ 539 Return a list of HTML element definitions for options describing the 540 given 'values', selecting the option with the specified 'value' if 541 present. 542 """ 543 544 options = [] 545 for available_value in values: 546 selected = self._get_selected(available_value, value) 547 options.append('<option value="%s" %s>%s</option>' % ( 548 escattr(available_value), selected, wikiutil.escape(available_value))) 549 return options 550 551 def _get_input(self, form, name, default=None): 552 553 """ 554 Return the input from 'form' having the given 'name', returning either 555 the input converted to an integer or the given 'default' (optional, None 556 if not specified). 557 """ 558 559 value = form.get(name, [None])[0] 560 if not value: # true if 0 obtained 561 return default 562 else: 563 return int(value) 564 565 def get_form(request): 566 567 "Work around disruptive MoinMoin changes in 1.9." 568 569 if hasattr(request, "values"): 570 return Form(request) 571 else: 572 return request.form 573 574 class send_headers_cls: 575 576 """ 577 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 578 1.9.x environment. 579 """ 580 581 def __init__(self, request): 582 self.request = request 583 584 def __call__(self, headers): 585 for header in headers: 586 parts = header.split(":") 587 self.request.headers.add(parts[0], ":".join(parts[1:])) 588 589 def get_send_headers(request): 590 591 "Return a function that can send response headers." 592 593 if hasattr(request, "http_headers"): 594 return request.http_headers 595 elif hasattr(request, "emit_http_headers"): 596 return request.emit_http_headers 597 else: 598 return send_headers_cls(request) 599 600 def escattr(s): 601 return wikiutil.escape(s, 1) 602 603 def getPathInfo(request): 604 if hasattr(request, "getPathinfo"): 605 return request.getPathinfo() 606 else: 607 return request.path 608 609 def getHeader(request, header_name, prefix=None): 610 611 """ 612 Using the 'request', return the value of the header with the given 613 'header_name', using the optional 'prefix' to obtain protocol-specific 614 headers if necessary. 615 616 If no value is found for the given 'header_name', None is returned. 617 """ 618 619 if hasattr(request, "getHeader"): 620 return request.getHeader(header_name) 621 elif hasattr(request, "headers"): 622 return request.headers.get(header_name) 623 else: 624 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 625 626 def writeHeaders(request, mimetype, metadata, status=None): 627 628 """ 629 Using the 'request', write resource headers using the given 'mimetype', 630 based on the given 'metadata'. If the optional 'status' is specified, set 631 the status header to the given value. 632 """ 633 634 send_headers = get_send_headers(request) 635 636 # Define headers. 637 638 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 639 640 # Define the last modified time. 641 # NOTE: Consider using request.httpDate. 642 643 latest_timestamp = metadata.get("last-modified") 644 if latest_timestamp: 645 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 646 647 if status: 648 headers.append("Status: %s" % status) 649 650 send_headers(headers) 651 652 # Page access functions. 653 654 def getPageURL(page): 655 656 "Return the URL of the given 'page'." 657 658 request = page.request 659 return request.getQualifiedURL(page.url(request, relative=0)) 660 661 def getFormat(page): 662 663 "Get the format used on the given 'page'." 664 665 return page.pi["format"] 666 667 def getMetadata(page): 668 669 """ 670 Return a dictionary containing items describing for the given 'page' the 671 page's "created" time, "last-modified" time, "sequence" (or revision number) 672 and the "last-comment" made about the last edit. 673 """ 674 675 request = page.request 676 677 # Get the initial revision of the page. 678 679 revisions = page.getRevList() 680 681 if not revisions: 682 return {} 683 684 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 685 686 # Get the created and last modified times. 687 688 initial_revision = getPageRevision(event_page_initial) 689 690 metadata = {} 691 metadata["created"] = initial_revision["timestamp"] 692 latest_revision = getPageRevision(page) 693 metadata["last-modified"] = latest_revision["timestamp"] 694 metadata["sequence"] = len(revisions) - 1 695 metadata["last-comment"] = latest_revision["comment"] 696 697 return metadata 698 699 def getPageRevision(page): 700 701 "Return the revision details dictionary for the given 'page'." 702 703 # From Page.edit_info... 704 705 if hasattr(page, "editlog_entry"): 706 line = page.editlog_entry() 707 else: 708 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 709 710 # Similar to Page.mtime_usecs behaviour... 711 712 if line: 713 timestamp = line.ed_time_usecs 714 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 715 comment = line.comment 716 else: 717 mtime = 0 718 comment = "" 719 720 # Give the time zone as UTC. 721 722 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 723 724 # Page parsing and formatting of embedded content. 725 726 def getPageParserClass(request): 727 728 "Using 'request', return a parser class for the current page's format." 729 730 return getParserClass(request, getFormat(request.page)) 731 732 def getParserClass(request, format): 733 734 """ 735 Return a parser class using the 'request' for the given 'format', returning 736 a plain text parser if no parser can be found for the specified 'format'. 737 """ 738 739 try: 740 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 741 except wikiutil.PluginMissingError: 742 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 743 744 def getFormatterClass(request, format): 745 746 """ 747 Return a formatter class using the 'request' for the given output 'format', 748 returning a plain text formatter if no formatter can be found for the 749 specified 'format'. 750 """ 751 752 try: 753 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 754 except wikiutil.PluginMissingError: 755 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 756 757 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 758 759 """ 760 Format the given 'text' using the specified 'request' and formatter 'fmt'. 761 Suppress line anchors in the output, and fix lists by indicating that a 762 paragraph has already been started. 763 """ 764 765 if not parser_cls: 766 parser_cls = getPageParserClass(request) 767 parser = parser_cls(text, request, line_anchors=False) 768 769 old_fmt = request.formatter 770 request.formatter = fmt 771 try: 772 if isinstance(fmt, text_moin_wiki.Parser): 773 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 774 else: 775 return redirectedOutput(request, parser, fmt) 776 finally: 777 request.formatter = old_fmt 778 779 def redirectedOutput(request, parser, fmt, **kw): 780 781 "A fixed version of the request method of the same name." 782 783 buf = StringIO() 784 request.redirect(buf) 785 try: 786 parser.format(fmt, **kw) 787 if hasattr(fmt, "flush"): 788 buf.write(fmt.flush(True)) 789 finally: 790 request.redirect() 791 text = buf.getvalue() 792 buf.close() 793 return text 794 795 # Finding components for content types. 796 797 def getParsersForContentType(cfg, mimetype): 798 799 """ 800 Find parsers that support the given 'mimetype', constructing a dictionary 801 mapping content types to lists of parsers that is then cached in the 'cfg' 802 object. A list of suitable parsers is returned for 'mimetype'. 803 """ 804 805 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 806 available = {} 807 808 for name in wikiutil.getPlugins("parser", cfg): 809 810 # Import each parser in order to inspect supported content types. 811 812 try: 813 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 814 except wikiutil.PluginMissingError: 815 continue 816 817 # Attempt to determine supported content types. 818 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 819 # NOTE: used. 820 821 if hasattr(parser_cls, "input_mimetypes"): 822 for input_mimetype in parser_cls.input_mimetypes: 823 if not available.has_key(input_mimetype): 824 available[input_mimetype] = [] 825 available[input_mimetype].append(parser_cls) 826 827 # Support some basic parsers. 828 829 elif name == "text_moin_wiki": 830 available["text/moin-wiki"] = [parser_cls] 831 available["text/moin"] = [parser_cls] 832 elif name == "text_html": 833 available["text/html"] = [parser_cls] 834 available["application/xhtml+xml"] = [parser_cls] 835 836 cfg.cache.MIMETYPE_TO_PARSER = available 837 838 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 839 840 # Textual representations. 841 842 def getSimpleWikiText(text): 843 844 """ 845 Return the plain text representation of the given 'text' which may employ 846 certain Wiki syntax features, such as those providing verbatim or monospaced 847 text. 848 """ 849 850 # NOTE: Re-implementing support for verbatim text and linking avoidance. 851 852 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 853 854 def getEncodedWikiText(text): 855 856 "Encode the given 'text' in a verbatim representation." 857 858 return "<<Verbatim(%s)>>" % text 859 860 def getPrettyTitle(title): 861 862 "Return a nicely formatted version of the given 'title'." 863 864 return title.replace("_", " ").replace("/", u" ? ") 865 866 # User interface functions. 867 868 def getParameter(request, name, default=None): 869 870 """ 871 Using the given 'request', return the value of the parameter with the given 872 'name', returning the optional 'default' (or None) if no value was supplied 873 in the 'request'. 874 """ 875 876 return get_form(request).get(name, [default])[0] 877 878 def getQualifiedParameter(request, prefix, argname, default=None): 879 880 """ 881 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 882 qualified parameter, returning the optional 'default' (or None) if no value 883 was supplied in the 'request'. 884 """ 885 886 argname = getQualifiedParameterName(prefix, argname) 887 return getParameter(request, argname, default) 888 889 def getQualifiedParameterName(prefix, argname): 890 891 """ 892 Return the qualified parameter name using the given 'prefix' and 'argname'. 893 """ 894 895 if not prefix: 896 return argname 897 else: 898 return "%s-%s" % (prefix, argname) 899 900 # Page-related functions. 901 902 def getPrettyPageName(page): 903 904 "Return a nicely formatted title/name for the given 'page'." 905 906 title = page.split_title(force=1) 907 return getPrettyTitle(title) 908 909 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 910 911 """ 912 Using 'request', return a link to 'page' with the given link 'text' and 913 optional 'query_string' and 'anchor'. 914 """ 915 916 text = wikiutil.escape(text) 917 return page.link_to_raw(request, text, query_string, anchor, **kw) 918 919 def linkToResource(url, request, text, query_string=None, anchor=None): 920 921 """ 922 Using 'request', return a link to 'url' with the given link 'text' and 923 optional 'query_string' and 'anchor'. 924 """ 925 926 if anchor: 927 url += "#%s" % anchor 928 929 if query_string: 930 query_string = wikiutil.makeQueryString(query_string) 931 url += "?%s" % query_string 932 933 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 934 935 output = [] 936 output.append(formatter.url(1, url)) 937 output.append(formatter.text(text)) 938 output.append(formatter.url(0)) 939 return "".join(output) 940 941 def getFullPageName(parent, title): 942 943 """ 944 Return a full page name from the given 'parent' page (can be empty or None) 945 and 'title' (a simple page name). 946 """ 947 948 if parent: 949 return "%s/%s" % (parent.rstrip("/"), title) 950 else: 951 return title 952 953 # Content storage support. 954 955 class ItemStore(ItemDirectoryStore): 956 957 "A page-specific item store." 958 959 def __init__(self, page, item_dir="items", lock_dir="item_locks"): 960 961 "Initialise an item store for the given 'page'." 962 963 item_dir_path = tuple(item_dir.split("/")) 964 lock_dir_path = tuple(lock_dir.split("/")) 965 ItemDirectoryStore.__init__(self, page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 966 self.page = page 967 968 def can_write(self): 969 970 """ 971 Return whether the user associated with the request can write to the 972 page owning this store. 973 """ 974 975 user = self.page.request.user 976 return user and user.may.write(self.page.page_name) 977 978 def can_read(self): 979 980 """ 981 Return whether the user associated with the request can read from the 982 page owning this store. 983 """ 984 985 user = self.page.request.user 986 return user and user.may.read(self.page.page_name) 987 988 # High-level methods. 989 990 def append(self, item): 991 992 "Append the given 'item' to the store." 993 994 if not self.can_write(): 995 return 996 997 ItemDirectoryStore.append(self, item) 998 999 def __len__(self): 1000 1001 """ 1002 Return the number of the next item (which should also be the number of 1003 items). 1004 """ 1005 1006 if not self.can_read(): 1007 return 0 1008 1009 return ItemDirectoryStore.__len__(self) 1010 1011 def __getitem__(self, number): 1012 1013 "Return the item with the given 'number'." 1014 1015 if not self.can_read(): 1016 raise IndexError, number 1017 1018 return ItemDirectoryStore.__getitem__(self, number) 1019 1020 # vim: tabstop=4 expandtab shiftwidth=4