1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 # Moin 1.9 request parameters. 20 21 try: 22 from MoinMoin.support.werkzeug.datastructures import MultiDict 23 except ImportError: 24 pass 25 26 __version__ = "0.2" 27 28 # Extraction of shared fragments. 29 30 marker_regexp_str = r"([{]{3,}|[}]{3,})" 31 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 32 33 # Extraction of headings. 34 35 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 36 37 # Category extraction from pages. 38 39 category_regexp = None 40 41 # Simple content parsing. 42 43 verbatim_regexp = re.compile(ur'(?:' 44 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 45 ur'|' 46 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 47 ur'|' 48 ur'!(?P<verbatim3>.*?)(\s|$)?' 49 ur'|' 50 ur'`(?P<monospace>.*?)`' 51 ur'|' 52 ur'{{{(?P<preformatted>.*?)}}}' 53 ur')', re.UNICODE) 54 55 # Category discovery. 56 57 def getCategoryPattern(request): 58 global category_regexp 59 60 try: 61 return request.cfg.cache.page_category_regexact 62 except AttributeError: 63 64 # Use regular expression from MoinMoin 1.7.1 otherwise. 65 66 if category_regexp is None: 67 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 68 return category_regexp 69 70 def getCategories(request): 71 72 """ 73 From the AdvancedSearch macro, return a list of category page names using 74 the given 'request'. 75 """ 76 77 # This will return all pages with "Category" in the title. 78 79 cat_filter = getCategoryPattern(request).search 80 return request.rootpage.getPageList(filter=cat_filter) 81 82 def getCategoryMapping(category_pagenames, request): 83 84 """ 85 For the given 'category_pagenames' return a list of tuples of the form 86 (category name, category page name) using the given 'request'. 87 """ 88 89 cat_pattern = getCategoryPattern(request) 90 mapping = [] 91 for pagename in category_pagenames: 92 name = cat_pattern.match(pagename).group("key") 93 if name != "Category": 94 mapping.append((name, pagename)) 95 mapping.sort() 96 return mapping 97 98 def getCategoryPages(pagename, request): 99 100 """ 101 Return the pages associated with the given category 'pagename' using the 102 'request'. 103 """ 104 105 query = search.QueryParser().parse_query('category:%s' % pagename) 106 results = search.searchPages(request, query, "page_name") 107 return filterCategoryPages(results, request) 108 109 def filterCategoryPages(results, request): 110 111 "Filter category pages from the given 'results' using the 'request'." 112 113 cat_pattern = getCategoryPattern(request) 114 pages = [] 115 for page in results.hits: 116 if not cat_pattern.match(page.page_name): 117 pages.append(page) 118 return pages 119 120 def getAllCategoryPages(category_names, request): 121 122 """ 123 Return all pages belonging to the categories having the given 124 'category_names', using the given 'request'. 125 """ 126 127 pages = [] 128 pagenames = set() 129 130 for category_name in category_names: 131 132 # Get the pages and page names in the category. 133 134 pages_in_category = getCategoryPages(category_name, request) 135 136 # Visit each page in the category. 137 138 for page_in_category in pages_in_category: 139 pagename = page_in_category.page_name 140 141 # Only process each page once. 142 143 if pagename in pagenames: 144 continue 145 else: 146 pagenames.add(pagename) 147 148 pages.append(page_in_category) 149 150 return pages 151 152 def getPagesForSearch(search_pattern, request): 153 154 """ 155 Return result pages for a search employing the given 'search_pattern' and 156 using the given 'request'. 157 """ 158 159 query = search.QueryParser().parse_query(search_pattern) 160 results = search.searchPages(request, query, "page_name") 161 return filterCategoryPages(results, request) 162 163 # WikiDict functions. 164 165 def getWikiDict(pagename, request): 166 167 """ 168 Return the WikiDict provided by the given 'pagename' using the given 169 'request'. 170 """ 171 172 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 173 if hasattr(request.dicts, "dict"): 174 return request.dicts.dict(pagename) 175 else: 176 return request.dicts[pagename] 177 else: 178 return None 179 180 # Searching-related functions. 181 182 def getPagesFromResults(result_pages, request): 183 184 "Return genuine pages for the given 'result_pages' using the 'request'." 185 186 return [Page(request, page.page_name) for page in result_pages] 187 188 # Region/section parsing. 189 190 def getRegions(s, include_non_regions=False): 191 192 """ 193 Parse the string 's', returning a list of explicitly declared regions. 194 195 If 'include_non_regions' is specified as a true value, fragments will be 196 included for text between explicitly declared regions. 197 """ 198 199 regions = [] 200 marker = None 201 is_block = True 202 203 # Start a region for exposed text, if appropriate. 204 205 if include_non_regions: 206 regions.append("") 207 208 for match_text in marker_regexp.split(s): 209 210 # Capture section text. 211 212 if is_block: 213 if marker or include_non_regions: 214 regions[-1] += match_text 215 216 # Handle section markers. 217 218 else: 219 220 # Close any open sections, returning to exposed text regions. 221 222 if marker: 223 224 # Add any marker to the current region, regardless of whether it 225 # successfully closes a section. 226 227 regions[-1] += match_text 228 229 if match_text.startswith("}") and len(marker) == len(match_text): 230 marker = None 231 232 # Start a region for exposed text, if appropriate. 233 234 if include_non_regions: 235 regions.append("") 236 237 # Without a current marker, start a new section. 238 239 else: 240 marker = match_text 241 regions.append("") 242 243 # Add the marker to the new region. 244 245 regions[-1] += match_text 246 247 # The match text alternates between text between markers and the markers 248 # themselves. 249 250 is_block = not is_block 251 252 return regions 253 254 def getFragmentsFromRegions(regions): 255 256 """ 257 Return fragments from the given 'regions', each having the form 258 (format, attributes, body text). 259 """ 260 261 fragments = [] 262 263 for region in regions: 264 format, attributes, body, header, close = getFragmentFromRegion(region) 265 fragments.append((format, attributes, body)) 266 267 return fragments 268 269 def getFragmentFromRegion(region): 270 271 """ 272 Return a fragment for the given 'region' having the form (format, 273 attributes, body text, header, close), where the 'header' is the original 274 declaration of the 'region' or None if no explicit region is defined, and 275 'close' is the closing marker of the 'region' or None if no explicit region 276 is defined. 277 """ 278 279 if region.startswith("{{{"): 280 281 body = region.lstrip("{") 282 level = len(region) - len(body) 283 body = body.rstrip("}").lstrip() 284 285 # Remove any prelude and process metadata. 286 287 if body.startswith("#!"): 288 289 try: 290 declaration, body = body.split("\n", 1) 291 except ValueError: 292 declaration = body 293 body = "" 294 295 arguments = declaration[2:] 296 297 # Get any parser/format declaration. 298 299 if arguments and not arguments[0].isspace(): 300 details = arguments.split(None, 1) 301 if len(details) == 2: 302 format, arguments = details 303 else: 304 format = details[0] 305 arguments = "" 306 else: 307 format = None 308 309 # Get the attributes/arguments for the region. 310 311 attributes = parseAttributes(arguments, False) 312 313 # Add an entry for the format in the attribute dictionary. 314 315 if format and not attributes.has_key(format): 316 attributes[format] = True 317 318 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 319 320 else: 321 return None, {}, body, level * "{" + "\n", level * "}" 322 323 else: 324 return None, {}, region, None, None 325 326 def getFragments(s, include_non_regions=False): 327 328 """ 329 Return fragments for the given string 's', each having the form 330 (format, arguments, body text). 331 332 If 'include_non_regions' is specified as a true value, fragments will be 333 included for text between explicitly declared regions. 334 """ 335 336 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 337 338 # Heading extraction. 339 340 def getHeadings(s): 341 342 """ 343 Return tuples of the form (level, title, span) for headings found within the 344 given string 's'. The span is itself a (start, end) tuple indicating the 345 matching region of 's' for a heading declaration. 346 """ 347 348 headings = [] 349 350 for match in heading_regexp.finditer(s): 351 headings.append( 352 (len(match.group("level")), match.group("heading"), match.span()) 353 ) 354 355 return headings 356 357 # Region/section attribute parsing. 358 359 def parseAttributes(s, escape=True): 360 361 """ 362 Parse the section attributes string 's', returning a mapping of names to 363 values. If 'escape' is set to a true value, the attributes will be suitable 364 for use with the formatter API. If 'escape' is set to a false value, the 365 attributes will have any quoting removed. 366 """ 367 368 attrs = {} 369 f = StringIO(s) 370 name = None 371 need_value = False 372 lex = shlex(f) 373 lex.wordchars += "-" 374 375 for token in lex: 376 377 # Capture the name if needed. 378 379 if name is None: 380 name = escape and wikiutil.escape(token) or strip_token(token) 381 382 # Detect either an equals sign or another name. 383 384 elif not need_value: 385 if token == "=": 386 need_value = True 387 else: 388 attrs[name.lower()] = escape and "true" or True 389 name = wikiutil.escape(token) 390 391 # Otherwise, capture a value. 392 393 else: 394 # Quoting of attributes done similarly to wikiutil.parseAttributes. 395 396 if token: 397 if escape: 398 if token[0] in ("'", '"'): 399 token = wikiutil.escape(token) 400 else: 401 token = '"%s"' % wikiutil.escape(token, 1) 402 else: 403 token = strip_token(token) 404 405 attrs[name.lower()] = token 406 name = None 407 need_value = False 408 409 # Handle any name-only attributes at the end of the collection. 410 411 if name and not need_value: 412 attrs[name.lower()] = escape and "true" or True 413 414 return attrs 415 416 def strip_token(token): 417 418 "Return the given 'token' stripped of quoting." 419 420 if token[0] in ("'", '"') and token[-1] == token[0]: 421 return token[1:-1] 422 else: 423 return token 424 425 # Request-related classes and associated functions. 426 427 class Form: 428 429 """ 430 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 431 environment. 432 """ 433 434 def __init__(self, request): 435 self.request = request 436 self.form = request.values 437 438 def has_key(self, name): 439 return not not self.form.getlist(name) 440 441 def get(self, name, default=None): 442 values = self.form.getlist(name) 443 if not values: 444 return default 445 else: 446 return values 447 448 def __getitem__(self, name): 449 return self.form.getlist(name) 450 451 def __setitem__(self, name, value): 452 try: 453 self.form.setlist(name, value) 454 except TypeError: 455 self._write_enable() 456 self.form.setlist(name, value) 457 458 def __delitem__(self, name): 459 try: 460 del self.form[name] 461 except TypeError: 462 self._write_enable() 463 del self.form[name] 464 465 def _write_enable(self): 466 self.form = self.request.values = MultiDict(self.form) 467 468 def keys(self): 469 return self.form.keys() 470 471 def items(self): 472 return self.form.lists() 473 474 class ActionSupport: 475 476 """ 477 Work around disruptive MoinMoin changes in 1.9, and also provide useful 478 convenience methods. 479 """ 480 481 def get_form(self): 482 return get_form(self.request) 483 484 def _get_selected(self, value, input_value): 485 486 """ 487 Return the HTML attribute text indicating selection of an option (or 488 otherwise) if 'value' matches 'input_value'. 489 """ 490 491 return input_value is not None and value == input_value and 'selected="selected"' or '' 492 493 def _get_selected_for_list(self, value, input_values): 494 495 """ 496 Return the HTML attribute text indicating selection of an option (or 497 otherwise) if 'value' matches one of the 'input_values'. 498 """ 499 500 return value in input_values and 'selected="selected"' or '' 501 502 def get_option_list(self, value, values): 503 504 """ 505 Return a list of HTML element definitions for options describing the 506 given 'values', selecting the option with the specified 'value' if 507 present. 508 """ 509 510 options = [] 511 for available_value in values: 512 selected = self._get_selected(available_value, value) 513 options.append('<option value="%s" %s>%s</option>' % ( 514 escattr(available_value), selected, wikiutil.escape(available_value))) 515 return options 516 517 def _get_input(self, form, name, default=None): 518 519 """ 520 Return the input from 'form' having the given 'name', returning either 521 the input converted to an integer or the given 'default' (optional, None 522 if not specified). 523 """ 524 525 value = form.get(name, [None])[0] 526 if not value: # true if 0 obtained 527 return default 528 else: 529 return int(value) 530 531 def get_form(request): 532 533 "Work around disruptive MoinMoin changes in 1.9." 534 535 if hasattr(request, "values"): 536 return Form(request) 537 else: 538 return request.form 539 540 class send_headers_cls: 541 542 """ 543 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 544 1.9.x environment. 545 """ 546 547 def __init__(self, request): 548 self.request = request 549 550 def __call__(self, headers): 551 for header in headers: 552 parts = header.split(":") 553 self.request.headers.add(parts[0], ":".join(parts[1:])) 554 555 def get_send_headers(request): 556 557 "Return a function that can send response headers." 558 559 if hasattr(request, "http_headers"): 560 return request.http_headers 561 elif hasattr(request, "emit_http_headers"): 562 return request.emit_http_headers 563 else: 564 return send_headers_cls(request) 565 566 def escattr(s): 567 return wikiutil.escape(s, 1) 568 569 def getPathInfo(request): 570 if hasattr(request, "getPathinfo"): 571 return request.getPathinfo() 572 else: 573 return request.path 574 575 def getHeader(request, header_name, prefix=None): 576 577 """ 578 Using the 'request', return the value of the header with the given 579 'header_name', using the optional 'prefix' to obtain protocol-specific 580 headers if necessary. 581 582 If no value is found for the given 'header_name', None is returned. 583 """ 584 585 if hasattr(request, "getHeader"): 586 return request.getHeader(header_name) 587 elif hasattr(request, "headers"): 588 return request.headers.get(header_name) 589 else: 590 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 591 592 def writeHeaders(request, mimetype, metadata, status=None): 593 594 """ 595 Using the 'request', write resource headers using the given 'mimetype', 596 based on the given 'metadata'. If the optional 'status' is specified, set 597 the status header to the given value. 598 """ 599 600 send_headers = get_send_headers(request) 601 602 # Define headers. 603 604 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 605 606 # Define the last modified time. 607 # NOTE: Consider using request.httpDate. 608 609 latest_timestamp = metadata.get("last-modified") 610 if latest_timestamp: 611 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 612 613 if status: 614 headers.append("Status: %s" % status) 615 616 send_headers(headers) 617 618 # Page access functions. 619 620 def getPageURL(page): 621 622 "Return the URL of the given 'page'." 623 624 request = page.request 625 return request.getQualifiedURL(page.url(request, relative=0)) 626 627 def getFormat(page): 628 629 "Get the format used on the given 'page'." 630 631 return page.pi["format"] 632 633 def getMetadata(page): 634 635 """ 636 Return a dictionary containing items describing for the given 'page' the 637 page's "created" time, "last-modified" time, "sequence" (or revision number) 638 and the "last-comment" made about the last edit. 639 """ 640 641 request = page.request 642 643 # Get the initial revision of the page. 644 645 revisions = page.getRevList() 646 647 if not revisions: 648 return {} 649 650 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 651 652 # Get the created and last modified times. 653 654 initial_revision = getPageRevision(event_page_initial) 655 656 metadata = {} 657 metadata["created"] = initial_revision["timestamp"] 658 latest_revision = getPageRevision(page) 659 metadata["last-modified"] = latest_revision["timestamp"] 660 metadata["sequence"] = len(revisions) - 1 661 metadata["last-comment"] = latest_revision["comment"] 662 663 return metadata 664 665 def getPageRevision(page): 666 667 "Return the revision details dictionary for the given 'page'." 668 669 # From Page.edit_info... 670 671 if hasattr(page, "editlog_entry"): 672 line = page.editlog_entry() 673 else: 674 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 675 676 # Similar to Page.mtime_usecs behaviour... 677 678 if line: 679 timestamp = line.ed_time_usecs 680 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 681 comment = line.comment 682 else: 683 mtime = 0 684 comment = "" 685 686 # Leave the time zone empty. 687 688 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 689 690 # Page parsing and formatting of embedded content. 691 692 def getPageParserClass(request): 693 694 "Using 'request', return a parser class for the current page's format." 695 696 return getParserClass(request, getFormat(request.page)) 697 698 def getParserClass(request, format): 699 700 """ 701 Return a parser class using the 'request' for the given 'format', returning 702 a plain text parser if no parser can be found for the specified 'format'. 703 """ 704 705 try: 706 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 707 except wikiutil.PluginMissingError: 708 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 709 710 def getFormatterClass(request, format): 711 712 """ 713 Return a formatter class using the 'request' for the given output 'format', 714 returning a plain text formatter if no formatter can be found for the 715 specified 'format'. 716 """ 717 718 try: 719 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 720 except wikiutil.PluginMissingError: 721 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 722 723 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 724 725 """ 726 Format the given 'text' using the specified 'request' and formatter 'fmt'. 727 Suppress line anchors in the output, and fix lists by indicating that a 728 paragraph has already been started. 729 """ 730 731 if not parser_cls: 732 parser_cls = getPageParserClass(request) 733 parser = parser_cls(text, request, line_anchors=False) 734 735 old_fmt = request.formatter 736 request.formatter = fmt 737 try: 738 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 739 finally: 740 request.formatter = old_fmt 741 742 def redirectedOutput(request, parser, fmt, **kw): 743 744 "A fixed version of the request method of the same name." 745 746 buf = StringIO() 747 request.redirect(buf) 748 try: 749 parser.format(fmt, **kw) 750 if hasattr(fmt, "flush"): 751 buf.write(fmt.flush(True)) 752 finally: 753 request.redirect() 754 text = buf.getvalue() 755 buf.close() 756 return text 757 758 # Textual representations. 759 760 def getSimpleWikiText(text): 761 762 """ 763 Return the plain text representation of the given 'text' which may employ 764 certain Wiki syntax features, such as those providing verbatim or monospaced 765 text. 766 """ 767 768 # NOTE: Re-implementing support for verbatim text and linking avoidance. 769 770 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 771 772 def getEncodedWikiText(text): 773 774 "Encode the given 'text' in a verbatim representation." 775 776 return "<<Verbatim(%s)>>" % text 777 778 def getPrettyTitle(title): 779 780 "Return a nicely formatted version of the given 'title'." 781 782 return title.replace("_", " ").replace("/", u" ? ") 783 784 # User interface functions. 785 786 def getParameter(request, name, default=None): 787 788 """ 789 Using the given 'request', return the value of the parameter with the given 790 'name', returning the optional 'default' (or None) if no value was supplied 791 in the 'request'. 792 """ 793 794 return get_form(request).get(name, [default])[0] 795 796 def getQualifiedParameter(request, prefix, argname, default=None): 797 798 """ 799 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 800 qualified parameter, returning the optional 'default' (or None) if no value 801 was supplied in the 'request'. 802 """ 803 804 argname = getQualifiedParameterName(prefix, argname) 805 return getParameter(request, argname, default) 806 807 def getQualifiedParameterName(prefix, argname): 808 809 """ 810 Return the qualified parameter name using the given 'prefix' and 'argname'. 811 """ 812 813 if not prefix: 814 return argname 815 else: 816 return "%s-%s" % (prefix, argname) 817 818 # Page-related functions. 819 820 def getPrettyPageName(page): 821 822 "Return a nicely formatted title/name for the given 'page'." 823 824 title = page.split_title(force=1) 825 return getPrettyTitle(title) 826 827 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 828 829 """ 830 Using 'request', return a link to 'page' with the given link 'text' and 831 optional 'query_string' and 'anchor'. 832 """ 833 834 text = wikiutil.escape(text) 835 return page.link_to_raw(request, text, query_string, anchor, **kw) 836 837 def linkToResource(url, request, text, query_string=None, anchor=None): 838 839 """ 840 Using 'request', return a link to 'url' with the given link 'text' and 841 optional 'query_string' and 'anchor'. 842 """ 843 844 if anchor: 845 url += "#%s" % anchor 846 847 if query_string: 848 query_string = wikiutil.makeQueryString(query_string) 849 url += "?%s" % query_string 850 851 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 852 853 output = [] 854 output.append(formatter.url(1, url)) 855 output.append(formatter.text(text)) 856 output.append(formatter.url(0)) 857 return "".join(output) 858 859 def getFullPageName(parent, title): 860 861 """ 862 Return a full page name from the given 'parent' page (can be empty or None) 863 and 'title' (a simple page name). 864 """ 865 866 if parent: 867 return "%s/%s" % (parent.rstrip("/"), title) 868 else: 869 return title 870 871 # vim: tabstop=4 expandtab shiftwidth=4