1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - SharedContent macro, based on the FeedReader macro 4 5 @copyright: 2008, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @license: GNU GPL (v2 or later), see COPYING.txt for details. 7 """ 8 9 from MoinMoin.Page import Page 10 from MoinRemoteSupport import * 11 import xml.dom.pulldom 12 13 try: 14 from cStringIO import StringIO 15 except ImportError: 16 from StringIO import StringIO 17 18 Dependencies = ["time"] 19 20 MAX_ENTRIES = 5 21 ATOM_NS = "http://www.w3.org/2005/Atom" 22 23 def text(element): 24 nodes = [] 25 for node in element.childNodes: 26 if node.nodeType == node.TEXT_NODE: 27 nodes.append(node.nodeValue) 28 return "".join(nodes) 29 30 def linktext(element, feed_type): 31 if feed_type == "rss": 32 return text(element) 33 else: 34 return element.getAttribute("href") 35 36 def execute(macro, args): 37 request = macro.request 38 fmt = macro.formatter 39 40 max_entries = MAX_ENTRIES 41 args = args.split(",") 42 if args: 43 try: 44 feed_url = args[0] 45 max_entries = int(args[1]) 46 except IndexError: 47 pass 48 49 # Obtain the resource, using a cached version if appropriate. 50 51 max_cache_age = int(getattr(request.cfg, "moin_share_max_cache_age", "300")) 52 data = getCachedResource(request, feed_url, "MoinShare", "wiki", max_cache_age) 53 if not data: 54 return fmt.text(_("SharedContent: updates could not be retrieved for %s") % feed_url) 55 56 feed = StringIO(data) 57 58 _url, _content_type, _encoding, _metadata = getCachedResourceMetadata(feed) 59 60 try: 61 # Parse each node from the feed. 62 63 title = link = None 64 channel_title = channel_link = None 65 66 output = [] 67 append = output.append 68 append(fmt.bullet_list(on=1)) 69 70 feed_type = None 71 in_item = False 72 nentries = 0 73 74 events = xml.dom.pulldom.parse(feed) 75 76 for event, value in events: 77 78 if event == xml.dom.pulldom.START_ELEMENT: 79 tagname = value.localName 80 81 # Detect the feed type and items. 82 83 if tagname == "feed" and value.namespaceURI == ATOM_NS: 84 feed_type = "atom" 85 86 elif tagname == "rss": 87 feed_type = "rss" 88 89 # Detect items. 90 91 elif feed_type == "rss" and tagname == "item" or \ 92 feed_type == "atom" and tagname == "entry": 93 94 in_item = True 95 96 elif tagname == "title": 97 events.expandNode(value) 98 if in_item: 99 title = value 100 else: 101 channel_title = value 102 103 elif tagname == "link": 104 events.expandNode(value) 105 if in_item: 106 link = value 107 else: 108 channel_link = value 109 110 elif event == xml.dom.pulldom.END_ELEMENT: 111 tagname = value.localName 112 113 if feed_type == "rss" and tagname == "item" or \ 114 feed_type == "atom" and tagname == "entry": 115 116 in_item = False 117 118 # Emit title and link information for items. 119 120 if title and link and nentries < max_entries: 121 link_text = linktext(link, feed_type) 122 123 append(fmt.listitem(on=1)) 124 append(fmt.url(on=1, href=link_text)) 125 append(fmt.icon('www')) 126 append(fmt.text(" " + text(title))) 127 append(fmt.url(on=0)) 128 append(fmt.listitem(on=0)) 129 130 title = link = None 131 nentries += 1 132 133 append(fmt.bullet_list(on=0)) 134 135 if channel_title and channel_link: 136 channel_link_text = linktext(channel_link, feed_type) 137 138 append(fmt.paragraph(on=1)) 139 append(fmt.url(on=1, href=channel_link_text)) 140 append(fmt.text(text(channel_title))) 141 append(fmt.url(on=0)) 142 append(fmt.text(" ")) 143 append(fmt.url(on=1, href=feed_url)) 144 append(fmt.icon('rss')) 145 append(fmt.url(on=0)) 146 append(fmt.paragraph(on=0)) 147 148 finally: 149 feed.close() 150 151 return ''.join(output) 152 153 # vim: tabstop=4 expandtab shiftwidth=4