本文整理汇总了Python中MoinMoin.wikiutil.version2timestamp函数的典型用法代码示例。如果您正苦于以下问题:Python version2timestamp函数的具体用法?Python version2timestamp怎么用?Python version2timestamp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了version2timestamp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: lastEditInfo
def lastEditInfo(self, request=None):
time = wikiutil.version2timestamp(self.last_modified())
if request:
time = request.user.getFormattedDateTime(time) # Use user time format
else:
time = datetime.datetime.fromtimestamp(time*1000*1000).strftime('%Y-%m-%d %H:%M:%S') # Use user time format
return {'editor': self.user, 'time': time}
开发者ID:happytk,项目名称:moin,代码行数:7,代码来源:dayone_multi.py
示例2: _addRevisionHistory
def _addRevisionHistory(self, targetNode):
"""
This will generate a revhistory element which it will populate with
revision nodes. Each revision has the revnumber, date and author-
initial elements, and if a comment was supplied, the comment element.
The date elements format depends on the users settings, so it will
be in the same format as the revision history as viewed in the
page info on the wiki.
The authorinitials will be the UserName or if it was an anonymous
edit, then it will be the hostname/ip-address.
The revision history of included documents is NOT included at the
moment due to technical difficulties.
"""
_ = self.request.getText
log = editlog.EditLog(self.request, rootpagename=self.title)
user_cache = {}
history = tree.element(None, u"revhistory")
# read in the complete log of this page
for line in log.reverse():
if not line.action in ("SAVE", "SAVENEW", "SAVE/REVERT", "SAVE/RENAME"):
# Let's ignore adding of attachments
continue
revision = tree.element(None, u"revision")
# Revision number (without preceeding zeros)
self._addTextElem(revision, u"revnumber", line.rev.lstrip("0"))
# Date of revision
date_text = self.request.user.getFormattedDateTime(wikiutil.version2timestamp(line.ed_time_usecs))
self._addTextElem(revision, u"date", date_text)
# Author or revision
if not (line.userid in user_cache):
user_cache[line.userid] = user.User(self.request, line.userid, auth_method="text_docbook:740")
author = user_cache[line.userid]
if author and author.name:
self._addTextElem(revision, u"authorinitials", author.name)
else:
self._addTextElem(revision, u"authorinitials", line.hostname)
# Comment from author of revision
comment = line.comment
if not comment:
if "/REVERT" in line.action:
comment = _("Revert to revision %(rev)d.") % {"rev": int(line.extra)}
elif "/RENAME" in line.action:
comment = _("Renamed from '%(oldpagename)s'.") % {"oldpagename": line.extra}
if comment:
self._addTextElem(revision, u"revremark", comment)
history.xml_append(revision)
if history.xml_first_child:
# only add revision history is there is history to add
targetNode.xml_append(history)
开发者ID:pombredanne,项目名称:akara,代码行数:60,代码来源:text_docbook.py
示例3: _load_group
def _load_group(self):
request = self.request
group_name = self.name
page = Page(request, group_name)
if page.exists():
arena = 'pagegroups'
key = wikiutil.quoteWikinameFS(group_name)
cache = caching.CacheEntry(request, arena, key, scope='wiki', use_pickle=True)
try:
cache_mtime = cache.mtime()
page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
# TODO: fix up-to-date check mtime granularity problems.
#
# cache_mtime is float while page_mtime is integer
# The comparision needs to be done on the lowest type of both
if int(cache_mtime) > int(page_mtime):
# cache is uptodate
return cache.content()
else:
raise caching.CacheError
except caching.CacheError:
# either cache does not exist, is erroneous or not uptodate: recreate it
members, member_groups = super(WikiGroup, self)._load_group()
cache.update((members, member_groups))
return members, member_groups
else:
raise GroupDoesNotExistError(group_name)
开发者ID:IvanLogvinov,项目名称:soar,代码行数:28,代码来源:wiki_groups.py
示例4: _load_dict
def _load_dict(self):
request = self.request
dict_name = self.name
page = Page(request, dict_name)
if page.exists():
arena = "pagedicts"
key = wikiutil.quoteWikinameFS(dict_name)
cache = caching.CacheEntry(request, arena, key, scope="wiki", use_pickle=True)
try:
cache_mtime = cache.mtime()
page_mtime = wikiutil.version2timestamp(page.mtime_usecs())
# TODO: fix up-to-date check mtime granularity problems.
#
# cache_mtime is float while page_mtime is integer
# The comparision needs to be done on the lowest type of both
if int(cache_mtime) > int(page_mtime):
# cache is uptodate
return cache.content()
else:
raise caching.CacheError
except caching.CacheError:
# either cache does not exist, is erroneous or not uptodate: recreate it
d = super(WikiDict, self)._load_dict()
cache.update(d)
return d
else:
raise DictDoesNotExistError(dict_name)
开发者ID:microcosmx,项目名称:experiments,代码行数:28,代码来源:wiki_dicts.py
示例5: packagePages
def packagePages(self, pagelist, filename, function):
""" Puts pages from pagelist into filename and calls function on them on installation. """
request = self.request
try:
os.remove(filename)
except OSError:
pass
zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)
cnt = 0
script = [packLine(['MoinMoinPackage', '1']), ]
for pagename in pagelist:
pagename = pagename.strip()
page = Page(request, pagename)
if page.exists():
cnt += 1
script.append(packLine([function, str(cnt), pagename]))
timestamp = wikiutil.version2timestamp(page.mtime_usecs())
zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
zi.compress_type = COMPRESSION_LEVEL
zf.writestr(zi, page.get_raw_body().encode("utf-8"))
else:
#import sys
#print >>sys.stderr, "Could not find the page %s." % pagename.encode("utf-8")
pass
script += [packLine(['Print', 'Installed MoinMaster page bundle %s.' % os.path.basename(filename)])]
zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
zf.close()
开发者ID:steveyen,项目名称:moingo,代码行数:31,代码来源:mkpagepacks.py
示例6: collectpackage
def collectpackage(self, pagelist, fileobject, pkgname="", include_attachments=False):
""" Expects a list of pages as an argument, and fileobject to be an open
file object, which a zipfile will get written to.
@param pagelist: pages to package
@param fileobject: open file object to write to
@param pkgname: optional file name, to prevent self packaging
@rtype: string or None
@return: error message, if one happened
@rtype: boolean
@param include_attachments: True if you want attachments collected
"""
_ = self.request.getText
COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED
pages = []
for pagename in pagelist:
pagename = wikiutil.normalize_pagename(pagename, self.request.cfg)
if pagename:
page = Page(self.request, pagename)
if page.exists() and self.request.user.may.read(pagename):
pages.append(page)
if not pages:
return (_('No pages like "%s"!') % wikiutil.escape(pagelist))
# Set zipfile output
zf = zipfile.ZipFile(fileobject, "w", COMPRESSION_LEVEL)
cnt = 0
userid = user.getUserIdentification(self.request)
script = [packLine(['MoinMoinPackage', '1']), ]
for page in pages:
cnt += 1
files = _get_files(self.request, page.page_name)
script.append(packLine(["AddRevision", str(cnt), page.page_name, userid, "Created by the PackagePages action."]))
timestamp = wikiutil.version2timestamp(page.mtime_usecs())
# avoid getting strange exceptions from zipfile in case of pre-1980 timestamps
nineteeneighty = (10 * 365 + 3) * 24 * 3600 # 1970 + 10y + 3d
timestamp = max(nineteeneighty, timestamp) # zip can not store timestamps before 1980
zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
zi.compress_type = COMPRESSION_LEVEL
zf.writestr(zi, page.get_raw_body().encode("utf-8"))
if include_attachments:
for attname in files:
if attname != pkgname:
cnt += 1
zipname = "%d_attachment" % cnt
script.append(packLine(["AddAttachment", zipname, attname, page.page_name, userid, "Created by the PackagePages action."]))
filename = AttachFile.getFilename(self.request, page.page_name, attname)
zf.write(filename, zipname)
script += [packLine(['Print', 'Thank you for using PackagePages!'])]
zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
zf.close()
开发者ID:Kartstig,项目名称:engineering-inventions-wiki,代码行数:58,代码来源:PackagePages.py
示例7: xmlrpc_getPageInfoVersion
def xmlrpc_getPageInfoVersion(self, pagename, rev):
"""
Return page information for specific revision
@param pagename: the name of the page (utf-8)
@param rev: revision to get info about (int)
@rtype: dict
@return: page information
* name (string): the canonical page name, UTF-8.
* lastModified (date): Last modification date, UTC.
* author (string): author name, UTF-8.
* version (int): current version
"""
pn = self._instr(pagename)
# User may read this page?
if not self.request.user.may.read(pn):
return self.notAllowedFault()
if rev is not None:
page = Page(self.request, pn, rev=rev)
else:
page = Page(self.request, pn)
rev = page.current_rev()
# Non existing page?
if not page.exists():
return self.noSuchPageFault()
# Get page info
edit_info = page.edit_info()
if not edit_info:
return self.noLogEntryFault()
mtime = wikiutil.version2timestamp(long(edit_info['timestamp'])) # must be long for py 2.2.x
gmtuple = tuple(time.gmtime(mtime))
version = rev # our new rev numbers: 1,2,3,4,....
#######################################################################
# BACKWARDS COMPATIBILITY CODE - remove when 1.2.x is regarded stone age
# as we run a feed for BadContent on MoinMaster, we want to stay
# compatible here for a while with 1.2.x moins asking us for BadContent
# 1.3 uses the lastModified field for checking for updates, so it
# should be no problem putting the old UNIX timestamp style of version
# number in the version field
if self.request.cfg.sitename == 'MoinMaster' and pagename == 'BadContent':
version = int(mtime)
#######################################################################
return {
'name': self._outstr(page.page_name),
'lastModified': xmlrpclib.DateTime(gmtuple),
'author': self._outstr(edit_info['editor']),
'version': version,
}
开发者ID:Kartstig,项目名称:engineering-inventions-wiki,代码行数:57,代码来源:__init__.py
示例8: flush_pages
def flush_pages(pages):
# new day reached: print out stuff
#pages.sort(lambda p, q: cmp(q.time_tuple, p.time_tuple))
date = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0].ed_time_usecs))
output.append(u'<h5>%s</h5><ul>' % date)
for p in pages:
output.append(format_page_edits(macro, p))
output.append(u'</ul>')
开发者ID:wada314,项目名称:gswiki,代码行数:12,代码来源:BriefRecentChanges.py
示例9: logchain2
def logchain2(request, log1, log2):
n1_next = None
n2_next = None
count = 0
while True:
count += 1
try:
n1 = n1_next or next(log1)
except StopIteration:
n1 = None
try:
n2 = n2_next or next(log2)
except StopIteration:
n2 = None
n1_next = n1
n2_next = n2
# print count, n1_next, n1.pagename, n2_next, n2.pagename
if n1 is None and n2 is None:
break
elif n1 is not None and n2 is not None:
t1 = request.user.getTime(wikiutil.version2timestamp(n1.ed_time_usecs))[:5]
t2 = request.user.getTime(wikiutil.version2timestamp(n2.ed_time_usecs))[:5]
if t1 > t2:
n1_next = None
yield n1
elif t1 == t2: #same item, discard one.
n1_next = None
n2_next = None
yield n1
else:
n2_next = None
yield n2
elif n1 is None:
n2_next = None
yield n2
else:
n1_next = None
yield n1
开发者ID:happytk,项目名称:moin,代码行数:40,代码来源:RecentChanges.py
示例10: xmlrpc_getRecentChanges
def xmlrpc_getRecentChanges(self, date):
"""
Get RecentChanges since date
@param date: date since when rc will be listed
@rtype: list
@return: a list of changed pages since date, which should be in
UTC. The result is a list, where each element is a struct:
* name (string) :
Name of the page. The name is in UTF-8.
* lastModified (date) :
Date of last modification, in UTC.
* author (string) :
Name of the author (if available). UTF-8.
* version (int) :
Current version.
"""
return_items = []
edit_log = editlog.EditLog(self.request)
for log in edit_log.reverse():
# get last-modified UTC (DateTime) from log
gmtuple = tuple(time.gmtime(wikiutil.version2timestamp(log.ed_time_usecs)))
lastModified_date = xmlrpclib.DateTime(gmtuple)
# skip if older than "date"
if lastModified_date < date:
break
# skip if knowledge not permitted
if not self.request.user.may.read(log.pagename):
continue
# get page name (str) from log
pagename_str = self._outstr(log.pagename)
# get user name (str) from log
author_str = log.hostname
if log.userid:
userdata = user.User(self.request, log.userid)
if userdata.name:
author_str = userdata.name
author_str = self._outstr(author_str)
return_item = {'name': pagename_str,
'lastModified': lastModified_date,
'author': author_str,
'version': int(log.rev) }
return_items.append(return_item)
return return_items
开发者ID:Kartstig,项目名称:engineering-inventions-wiki,代码行数:52,代码来源:__init__.py
示例11: logchain
def logchain(request, log1):
# inv_map = {}
# for k, v in request.cfg.routes.iteritems():
# inv_map[v] = inv_map.get(v, [])
# inv_map[v].append(k)
logs = [(log1, 'wiki'),] # default wiki recent-logs
for name, storage in request.storage.iteritems():
data = storage.history(request)
# name_filter_re = inv_map[name]
logs.append((data,name))
# next_data_checker
next_data = []
for a in range(len(logs)):
next_data.append(None)
while True:
for idx, packed in enumerate(logs):
# print packed
s, storage_name = packed
try:
if next_data[idx]: pass
else:
next_data[idx] = next(s) # if next_data is None, get the next data using 'next(s)'
while True:
mt = get_middleware_type(request, next_data[idx].pagename)
# mt = Page(request, next_data[idx].pagename).middleware_type()
if mt == storage_name:
break
else:
next_data[idx] = next(s)
except StopIteration:
next_data[idx] = None
if not max(next_data): # all is None
break
# pick the latest log among storages
times = []
for s in next_data:
if s is None: times.append(0)
else:
times.append(request.user.getTime(wikiutil.version2timestamp(s.ed_time_usecs))[:5])
mtime = max(times)
idx = times.index(mtime)
ydata = next_data[idx]
next_data[idx] = None # invalidate
yield ydata
开发者ID:happytk,项目名称:moin,代码行数:51,代码来源:RecentChanges.py
示例12: getPageListFromLog
def getPageListFromLog (request):
this_day = request.user.getTime(time.time())[0:3]
log = editlog.EditLog(request)
pages = {}
pagelist = []
ignore_pages = {}
day_count = 0
for line in log.reverse():
if not request.user.may.read(line.pagename):
continue
line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
day = line.time_tuple[0:3]
if ((this_day != day or (not _MAX_DAYS))) and len(pages) > 0:
# new day or bookmark reached: print out stuff
this_day = day
for page in pages:
ignore_pages[page] = None
for page in pages.values():
pagelist.append(page[0].pagename)
pages = {}
day_count += 1
if _MAX_DAYS and (day_count >= _MAX_DAYS):
break
elif this_day != day:
# new day but no changes
this_day = day
if ignore_pages.has_key(line.pagename):
continue
# end listing by default if user has a bookmark and we reached it
if not _MAX_DAYS:
break
if pages.has_key(line.pagename):
pages[line.pagename].append(line)
else:
pages[line.pagename] = [line]
else:
if len(pages) > 0:
for page in pages.values():
pagelist.append(page[0].pagename)
return pagelist
开发者ID:happytk,项目名称:moin,代码行数:49,代码来源:CustomTag.py
示例13: execute
def execute(pagename, request):
log = editlog.EditLog(request)
try:
lastmod = wikiutil.version2timestamp(log.date())
except:
lastmod = 0
timestamp = timefuncs.formathttpdate(lastmod)
etag = "%d" % lastmod
# for 304, we look at if-modified-since and if-none-match headers,
# one of them must match and the other is either not there or must match.
if request.if_modified_since == timestamp:
if request.if_none_match:
if request.if_none_match == etag:
request.emit_http_headers(["Status: 304 Not modified"])
else:
request.emit_http_headers(["Status: 304 Not modified"])
elif request.if_none_match == etag:
if request.if_modified_since:
if request.if_modified_since == timestamp:
request.emit_http_headers(["Status: 304 Not modified"])
else:
request.emit_http_headers(["Status: 304 Not modified"])
else:
# generate an Expires header, using 1d cache lifetime of sisterpages list
expires = timefuncs.formathttpdate(time.time() + 24 * 3600)
httpheaders = [
"Content-Type: text/plain; charset=UTF-8",
"Expires: %s" % expires,
"Last-Modified: %s" % timestamp,
"Etag: %s" % etag,
]
# send the generated XML document
request.emit_http_headers(httpheaders)
baseurl = request.getBaseURL()
if not baseurl.endswith("/"):
baseurl += "/"
# Get list of user readable pages
pages = request.rootpage.getPageList()
pages.sort()
for pn in pages:
p = Page(request, pn)
entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
request.write(entry.encode("utf-8"))
开发者ID:steveyen,项目名称:moingo,代码行数:49,代码来源:sisterpages.py
示例14: create_package
def create_package(self, script, page=None):
# creates the package example zip file
userid = user.getUserIdentification(self.request)
COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED
zip_file = tempfile.mkstemp(suffix='.zip')[1]
zf = zipfile.ZipFile(zip_file, "w", COMPRESSION_LEVEL)
if page:
timestamp = wikiutil.version2timestamp(page.mtime_usecs())
zi = zipfile.ZipInfo(filename="1", date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
zi.compress_type = COMPRESSION_LEVEL
zf.writestr(zi, page.get_raw_body().encode("utf-8"))
zf.writestr("1_attachment", "sample attachment")
zf.writestr(MOIN_PACKAGE_FILE, script.encode("utf-8"))
zf.close()
return zip_file
开发者ID:Glottotopia,项目名称:aagd,代码行数:15,代码来源:test_packages.py
示例15: packagePages
def packagePages(self, pagelist, filename, function):
""" Puts pages from pagelist into filename and calls function on them on installation. """
request = self.request
try:
os.remove(filename)
except OSError:
pass
# page LanguageSetup needs no packing!
existing_pages = [
pagename for pagename in pagelist if Page(request, pagename).exists() and pagename != "LanguageSetup"
]
if not existing_pages:
return
zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)
script = [packLine(["MoinMoinPackage", "1"])]
fallback_timestamp = int(time.time())
cnt = 0
for pagename in existing_pages:
pagename = pagename.strip()
page = Page(request, pagename)
files = _get_files(request, pagename)
for attname in files:
cnt += 1
zipname = "%d" % cnt
script.append(packLine(["ReplaceUnderlayAttachment", zipname, attname, pagename]))
attpath = AttachFile.getFilename(request, pagename, attname)
zf.write(attpath, zipname)
cnt += 1
zipname = "%d" % cnt
script.append(packLine([function, zipname, pagename]))
timestamp = wikiutil.version2timestamp(page.mtime_usecs())
if not timestamp:
# page.mtime_usecs() returns 0 for underlay pages
timestamp = fallback_timestamp
dt = datetime.fromtimestamp(timestamp)
zi = zipfile.ZipInfo(filename=zipname, date_time=dt.timetuple()[:6])
zi.compress_type = COMPRESSION_LEVEL
zf.writestr(zi, page.get_raw_body().encode("utf-8"))
script += [packLine(["Print", "Installed MoinMaster page bundle %s." % os.path.basename(filename)])]
zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
zf.close()
开发者ID:microcosmx,项目名称:experiments,代码行数:48,代码来源:mkpagepacks.py
示例16: _readLockFile
def _readLockFile(self):
"""Load lock info if not yet loaded."""
_ = self._
self.owner = None
self.owner_html = wikiutil.escape(_("<unknown>"))
self.timestamp = 0
if self.locktype:
try:
entry = editlog.EditLog(self.request, filename=self._filename()).next()
except StopIteration:
entry = None
if entry:
self.owner = entry.userid or entry.addr
self.owner_html = entry.getEditor(self.request)
self.timestamp = wikiutil.version2timestamp(entry.ed_time_usecs)
开发者ID:mikejamesthompson,项目名称:orgsites,代码行数:17,代码来源:PageEditor.py
示例17: show_pages
def show_pages(request, pagename, editor, timestamp):
_ = request.getText
timestamp = int(timestamp * 1000000)
log = editlog.EditLog(request)
pages = {}
# mimic macro object for use of RecentChanges subfunctions
macro = tmp()
macro.request = request
macro.formatter = request.html_formatter
request.write("<table>")
for line in log.reverse():
if line.ed_time_usecs < timestamp:
break
if not request.user.may.read(line.pagename):
continue
if not line.pagename in pages:
pages[line.pagename] = 1
if repr(line.getInterwikiEditorData(request)) == editor:
line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
request.write(RecentChanges.format_page_edits(macro, [line], timestamp))
request.write(
"""
</table>
<p>
<form method="post" action="%(url)s">
<input type="hidden" name="action" value="Despam">
<input type="hidden" name="ticket" value="%(ticket)s">
<input type="hidden" name="editor" value="%(editor)s">
<input type="submit" name="ok" value="%(label)s">
</form>
</p>
"""
% dict(
url=request.href(pagename),
ticket=wikiutil.createTicket(request),
editor=wikiutil.url_quote(editor),
label=_("Revert all!"),
)
)
开发者ID:Glottotopia,项目名称:aagd,代码行数:44,代码来源:Despam.py
示例18: execute
def execute(pagename, request):
log = editlog.EditLog(request)
try:
lastmod = wikiutil.version2timestamp(log.date())
except:
lastmod = 0
timestamp = timefuncs.formathttpdate(lastmod)
etag = "%d" % lastmod
# for 304, we look at if-modified-since and if-none-match headers,
# one of them must match and the other is either not there or must match.
if request.if_modified_since == timestamp:
if request.if_none_match:
if request.if_none_match == etag:
request.status_code = 304
else:
request.status_code = 304
elif request.if_none_match == etag:
if request.if_modified_since:
if request.if_modified_since == timestamp:
request.status_code = 304
else:
request.status_code = 304
else:
# generate an Expires header, using 1d cache lifetime of sisterpages list
expires = time.time() + 24*3600
request.mimetype = 'text/plain'
request.expires = expires
request.last_modified = timestamp
request.headers['Etag'] = etag
# send the generated XML document
# Get list of user readable pages
pages = request.rootpage.getPageList()
pages.sort()
for pn in pages:
p = Page(request, pn)
entry = u"%s %s\r\n" % (request.getQualifiedURL(p.url(request)), p.page_name)
request.write(entry.encode('utf-8'))
开发者ID:Glottotopia,项目名称:aagd,代码行数:41,代码来源:sisterpages.py
示例19: print_abandoned
def print_abandoned(macro):
request = macro.request
_ = request.getText
output = []
d = {}
page = macro.formatter.page
pagename = page.page_name
d["page"] = page
d["q_page_name"] = wikiutil.quoteWikinameURL(pagename)
msg = None
pages = request.rootpage.getPageList()
last_edits = []
for name in pages:
log = Page(request, name).editlog_entry()
if log:
last_edits.append(log)
# we don't want all Systempages at the beginning of the abandoned list
# line = editlog.EditLogLine({})
# line.pagename = page
# line.ed_time = 0
# line.comment = 'not edited'
# line.action = ''
# line.userid = ''
# line.hostname = ''
# line.addr = ''
# last_edits.append(line)
del pages
last_edits.sort()
# set max size in days
max_days = min(int(request.values.get("max_days", 0)), _DAYS_SELECTION[-1])
# default to _MAX_DAYS for users without bookmark
if not max_days:
max_days = _MAX_DAYS
d["rc_max_days"] = max_days
# give known user the option to extend the normal display
if request.user.valid:
d["rc_days"] = _DAYS_SELECTION
else:
d["rc_days"] = None
d["rc_update_bookmark"] = None
output.append(request.theme.recentchanges_header(d))
length = len(last_edits)
index = 0
last_index = 0
day_count = 0
if length > 0:
line = last_edits[index]
line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
this_day = line.time_tuple[0:3]
day = this_day
while 1:
index += 1
if index > length:
break
if index < length:
line = last_edits[index]
line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs))
day = line.time_tuple[0:3]
if (day != this_day) or (index == length):
d["bookmark_link_html"] = None
d["date"] = request.user.getFormattedDate(wikiutil.version2timestamp(last_edits[last_index].ed_time_usecs))
output.append(request.theme.recentchanges_daybreak(d))
this_day = day
for page in last_edits[last_index:index]:
output.append(format_page_edits(macro, [page], None))
last_index = index
day_count += 1
if day_count >= max_days:
break
d["rc_msg"] = msg
output.append(request.theme.recentchanges_footer(d))
return "".join(output)
开发者ID:graphingwiki,项目名称:gwiki-with-moin,代码行数:86,代码来源:RecentChanges.py
示例20: execute
def execute(pagename, request):
""" Send recent changes as an RSS document
"""
cfg = request.cfg
# get params
items_limit = 100
try:
max_items = int(request.values["items"])
max_items = min(max_items, items_limit) # not more than `items_limit`
except (KeyError, ValueError):
# not more than 15 items in a RSS file by default
max_items = 15
try:
unique = int(request.values.get("unique", 0))
except ValueError:
unique = 0
try:
diffs = int(request.values.get("diffs", 0))
except ValueError:
diffs = 0
## ddiffs inserted by Ralf Zosel <[email protected]>, 04.12.2003
try:
ddiffs = int(request.values.get("ddiffs", 0))
except ValueError:
ddiffs = 0
urlfilter = request.values.get("filter")
if urlfilter:
urlfilter = re.compile(urlfilter)
else:
urlfilter = None
# get data
log = editlog.EditLog(request)
logdata = []
counter = 0
pages = {}
lastmod = 0
for line in log.reverse():
if urlfilter and not (urlfilter.match(line.pagename)):
continue
if not request.user.may.read(line.pagename):
continue
if not line.action.startswith("SAVE") or ((line.pagename in pages) and unique):
continue
# if log.dayChanged() and log.daycount > _MAX_DAYS: break
line.editor = line.getInterwikiEditorData(request)
line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
logdata.append(line)
pages[line.pagename] = None
if not lastmod:
lastmod = wikiutil.version2timestamp(line.ed_time_usecs)
counter += 1
if counter >= max_items:
break
del log
timestamp = timefuncs.formathttpdate(lastmod)
etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)
# for 304, we look at if-modified-since and if-none-match headers,
# one of them must match and the other is either not there or must match.
if request.if_modified_since == timestamp:
if request.if_none_match:
if request.if_none_match == etag:
request.status_code = 304
else:
request.status_code = 304
elif request.if_none_match == etag:
if request.if_modified_since:
if request.if_modified_since == timestamp:
request.status_code = 304
else:
request.status_code = 304
else:
# generate an Expires header, using whatever setting the admin
# defined for suggested cache lifetime of the RecentChanges RSS doc
expires = time.time() + cfg.rss_cache
request.mimetype = "application/rss+xml"
request.expires = expires
request.last_modified = lastmod
request.headers["Etag"] = etag
# send the generated XML document
baseurl = request.url_root
logo = re.search(r'src="([^"]*)"', cfg.logo_string)
if logo:
logo = request.getQualifiedURL(logo.group(1))
# prepare output
output = structencoder(indent=u"yes")
FEED_HEADER_COMMENT = """
<!--
Add an "items=nnn" URL parameter to get more than the default 15 items.
#.........这里部分代码省略.........
开发者ID:pombredanne,项目名称:akara,代码行数:101,代码来源:atom_rc.py
注:本文中的MoinMoin.wikiutil.version2timestamp函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论