1
# -*- coding: iso-8859-1 -*-
3
MoinMoin - RequestBase Implementation
5
@copyright: 2001-2003 Juergen Hermann <jh@web.de>,
6
2003-2008 MoinMoin:ThomasWaldmann
7
@license: GNU GPL, see COPYING for details.
10
# Support for remote IP address detection when using (reverse) proxy (or even proxies).
11
# If you exactly KNOW which (reverse) proxies you can trust, put them into the list
12
# below, so we can determine the "outside" IP as your trusted proxies see it.
14
proxies_trusted = [] # trust noone!
15
#proxies_trusted = ['127.0.0.1', ] # can be a list of multiple IPs
17
from MoinMoin import log
18
logging = log.getLogger(__name__)
20
def find_remote_addr(addrs):
21
""" Find the last remote IP address before it hits our reverse proxies.
22
The LAST address in the <addrs> list is the remote IP as detected by the server
23
(not taken from some x-forwarded-for header).
24
The FIRST address in the <addrs> list might be the client's IP - if noone cheats
25
and everyone supports x-f-f header.
27
See http://bob.pythonmac.org/archives/2005/09/23/apache-x-forwarded-for-caveat/
29
For debug loglevel, we log all <addrs>.
31
TODO: refactor request code to first do some basic IP init, then load configuration,
32
TODO: then do proxy processing.
33
TODO: add wikiconfig configurability for proxies_trusted
34
TODO: later, make it possible to put multipe remote IP addrs into edit-log
36
logging.debug("request.find_remote_addr: addrs == %r" % addrs)
38
result = [addr for addr in addrs if addr not in proxies_trusted]
40
return result[-1] # last IP before it hit our trusted (reverse) proxies
41
return addrs[-1] # this is a safe remote_addr, not taken from x-f-f header
44
import os, re, time, sys, cgi, StringIO
48
from MoinMoin.Page import Page
49
from MoinMoin import config, wikiutil, user, caching, error
50
from MoinMoin.config import multiconfig
51
from MoinMoin.support.python_compatibility import set
52
from MoinMoin.util import IsWin9x
53
from MoinMoin.util.clock import Clock
54
from MoinMoin import auth
55
from urllib import quote, quote_plus
57
# umask setting --------------------------------------------------------
58
def set_umask(new_mask=0777^config.umask):
59
""" Set the OS umask value (and ignore potential failures on OSes where
60
this is not supported).
61
Default: the bitwise inverted value of config.umask
64
old_mask = os.umask(new_mask)
66
# maybe we are on win32?
69
# We do this at least once per Python process, when request is imported.
70
# If other software parts (like twistd's daemonize() function) set an
71
# unwanted umask, we have to call this again to set the correct one:
74
# Exceptions -----------------------------------------------------------
76
class MoinMoinFinish(Exception):
77
""" Raised to jump directly to end of run() function, where finish is called """
80
class HeadersAlreadySentException(Exception):
81
""" Is raised if the headers were already sent when emit_http_headers is called."""
84
class RemoteClosedConnection(Exception):
85
""" Remote end closed connection during request """
89
def cgiMetaVariable(header, scheme='http'):
90
""" Return CGI meta variable for header name
92
e.g 'User-Agent' -> 'HTTP_USER_AGENT'
93
See http://www.faqs.org/rfcs/rfc3875.html section 4.1.18
95
var = '%s_%s' % (scheme, header)
96
return var.upper().replace('-', '_')
99
# Request Base ----------------------------------------------------------
101
class RequestBase(object):
102
""" A collection for all data associated with ONE request. """
104
# Defaults (used by sub classes)
105
http_accept_language = 'en'
106
server_name = 'localhost'
109
# Extra headers we support. Both standalone and twisted store
110
# headers as lowercase.
111
moin_location = 'x-moin-location'
112
proxy_host = 'x-forwarded-host' # original host: header as seen by the proxy (e.g. wiki.example.org)
113
proxy_xff = 'x-forwarded-for' # list of original remote_addrs as seen by the proxies (e.g. <clientip>,<proxy1>,<proxy2>,...)
115
def __init__(self, properties={}):
117
# twistd's daemonize() overrides our umask, so we reset it here every
118
# request. we do it for all request types to avoid similar problems.
123
self._auth_redirected = False
125
# Decode values collected by sub classes
126
self.path_info = self.decodePagename(self.path_info)
129
self._available_actions = None
130
self._known_actions = None
132
# Pages meta data that we collect in one request
135
self.sent_headers = None
136
self.user_headers = []
137
self.cacheable = 0 # may this output get cached by http proxies/caches?
138
self.http_caching_disabled = 0 # see disableHttpCaching()
142
# session handling. users cannot rely on a session being
143
# created, but we should always set request.session
146
# setuid handling requires an attribute in the request
147
# that stores the real user
148
self._setuid_real_user = None
150
# Check for dumb proxy requests
151
# TODO relying on request_uri will not work on all servers, especially
152
# not on external non-Apache servers
153
self.forbidden = False
154
if self.request_uri.startswith('http://'):
155
self.makeForbidden403()
161
self.clock.start('total')
162
self.clock.start('base__init__')
163
# order is important here!
164
self.__dict__.update(properties)
166
self._load_multi_cfg()
167
except error.NoConfigMatchedError:
168
self.makeForbidden(404, 'No wiki configuration matching the URL found!\r\n')
171
self.isSpiderAgent = self.check_spider()
173
# Set decode charsets. Input from the user is always in
174
# config.charset, which is the page charsets. Except
175
# path_info, which may use utf-8, and handled by decodePagename.
176
self.decode_charsets = [config.charset]
178
if self.query_string.startswith('action=xmlrpc'):
181
self.action = 'xmlrpc'
185
self.args = self.form = self.setup_args()
187
self.makeForbidden(403, "The input you sent could not be understood.")
189
self.action = self.form.get('action', ['show'])[0]
191
self.rev = int(self.form['rev'][0])
195
from MoinMoin.Page import RootPage
196
self.rootpage = RootPage(self)
198
from MoinMoin.logfile import editlog
199
self.editlog = editlog.EditLog(self)
201
from MoinMoin import i18n
205
# authentication might require translated forms, so
206
# have a try at guessing the language from the browser
207
lang = i18n.requestLanguage(self, try_user=False)
208
self.getText = lambda text, i18n=self.i18n, request=self, lang=lang, **kw: i18n.getText(text, request, lang, **kw)
210
# session handler start, auth
212
user_obj = self.cfg.session_handler.start(self, self.cfg.session_id_handler)
213
shfinisher = lambda request: self.cfg.session_handler.finish(request, request.user,
214
self.cfg.session_id_handler)
215
self.add_finisher(shfinisher)
216
# set self.user even if _handle_auth_form raises an Exception
218
self.user = self._handle_auth_form(user_obj)
220
self.cfg.session_handler.after_auth(self, self.cfg.session_id_handler, self.user)
222
self.user = user.User(self, auth_method='request:invalid')
224
# setuid handling, check isSuperUser() because the user
225
# might have lost the permission between requests
226
if 'setuid' in self.session and self.user.isSuperUser():
227
self._setuid_real_user = self.user
228
uid = self.session['setuid']
229
self.user = user.User(self, uid, auth_method='setuid')
230
# set valid to True so superusers can even switch
231
# to disable accounts
232
self.user.valid = True
234
if self.action != 'xmlrpc':
235
if not self.forbidden and self.isForbidden():
236
self.makeForbidden403()
237
if not self.forbidden and self.surge_protect():
238
self.makeUnavailable503()
241
self.mode_getpagelinks = 0 # is > 0 as long as we are in a getPageLinks call
242
self.parsePageLinks_running = {} # avoid infinite recursion by remembering what we are already running
244
self.lang = i18n.requestLanguage(self)
245
# Language for content. Page content should use the wiki default lang,
246
# but generated content like search results should use the user language.
247
self.content_lang = self.cfg.language_default
248
self.getText = lambda text, i18n=self.i18n, request=self, lang=self.lang, **kv: i18n.getText(text, request, lang, **kv)
252
from MoinMoin.formatter.text_html import Formatter
253
self.html_formatter = Formatter(self)
254
self.formatter = self.html_formatter
256
self.clock.stop('base__init__')
258
def surge_protect(self, kick_him=False):
259
""" check if someone requesting too much from us,
260
if kick_him is True, we unconditionally blacklist the current user/ip
262
limits = self.cfg.surge_action_limits
266
validuser = self.user.valid
267
current_id = validuser and self.user.name or self.remote_addr
268
if not validuser and current_id.startswith('127.'): # localnet
270
current_action = self.action
272
default_limit = self.cfg.surge_action_limits.get('default', (30, 60))
274
now = int(time.time())
276
surge_detected = False
279
# if we have common farm users, we could also use scope='farm':
280
cache = caching.CacheEntry(self, 'surgeprotect', 'surge-log', scope='wiki', use_encode=True)
282
data = cache.content()
283
data = data.split("\n")
286
id, t, action, surge_indicator = line.split("\t")
288
maxnum, dt = limits.get(action, default_limit)
290
events = surgedict.setdefault(id, {})
291
timestamps = events.setdefault(action, [])
292
timestamps.append((t, surge_indicator))
293
except StandardError:
296
maxnum, dt = limits.get(current_action, default_limit)
297
events = surgedict.setdefault(current_id, {})
298
timestamps = events.setdefault(current_action, [])
299
surge_detected = len(timestamps) > maxnum
301
surge_indicator = surge_detected and "!" or ""
302
timestamps.append((now, surge_indicator))
304
if len(timestamps) < maxnum * 2:
305
timestamps.append((now + self.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out
307
if current_action != 'AttachFile': # don't add AttachFile accesses to all or picture galleries will trigger SP
308
current_action = 'all' # put a total limit on user's requests
309
maxnum, dt = limits.get(current_action, default_limit)
310
events = surgedict.setdefault(current_id, {})
311
timestamps = events.setdefault(current_action, [])
313
if kick_him: # ban this guy, NOW
314
timestamps.extend([(now + self.cfg.surge_lockout_time, "!")] * (2 * maxnum))
316
surge_detected = surge_detected or len(timestamps) > maxnum
318
surge_indicator = surge_detected and "!" or ""
319
timestamps.append((now, surge_indicator))
321
if len(timestamps) < maxnum * 2:
322
timestamps.append((now + self.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out
325
for id, events in surgedict.items():
326
for action, timestamps in events.items():
327
for t, surge_indicator in timestamps:
328
data.append("%s\t%d\t%s\t%s" % (id, t, action, surge_indicator))
329
data = "\n".join(data)
331
except StandardError:
334
if surge_detected and validuser and self.user.auth_method in self.cfg.auth_methods_trusted:
335
logging.info("Trusted user %s would have triggered surge protection if not trusted." % self.user.name)
336
return False # do not subject trusted users to surge protection
338
return surge_detected
341
""" Lazy initialize the dicts on the first access """
342
if self._dicts is None:
343
from MoinMoin import wikidicts
344
dicts = wikidicts.GroupDict(self)
350
""" Delete the dicts, used by some tests """
354
dicts = property(getDicts, None, delDicts)
356
def _load_multi_cfg(self):
357
# protect against calling multiple times
358
if not hasattr(self, 'cfg'):
359
self.clock.start('load_multi_cfg')
360
self.cfg = multiconfig.getConfig(self.url)
361
self.clock.stop('load_multi_cfg')
363
def setAcceptedCharsets(self, accept_charset):
364
""" Set accepted_charsets by parsing accept-charset header
366
Set self.accepted_charsets to an ordered list based on http_accept_charset.
368
Reference: http://www.w3.org/Protocols/rfc2616/rfc2616.txt
370
TODO: currently no code use this value.
372
@param accept_charset: accept-charset header
376
accept_charset = accept_charset.lower()
377
# Add iso-8859-1 if needed
378
if (not '*' in accept_charset and
379
'iso-8859-1' not in accept_charset):
380
accept_charset += ',iso-8859-1'
382
# Make a list, sorted by quality value, using Schwartzian Transform
383
# Create list of tuples (value, name) , sort, extract names
384
for item in accept_charset.split(','):
386
name, qval = item.split(';')
387
qval = 1.0 - float(qval.split('=')[1])
390
charsets.append((qval, name))
392
# Remove *, its not clear what we should do with it later
393
charsets = [name for qval, name in charsets if name != '*']
395
self.accepted_charsets = charsets
397
def _setup_vars_from_std_env(self, env):
398
""" Set common request variables from CGI environment
400
Parse a standard CGI environment as created by common web servers.
401
Reference: http://www.faqs.org/rfcs/rfc3875.html
403
@param env: dict like object containing cgi meta variables
405
# Values we can just copy
407
self.http_accept_language = env.get('HTTP_ACCEPT_LANGUAGE', self.http_accept_language)
408
self.server_name = env.get('SERVER_NAME', self.server_name)
409
self.server_port = env.get('SERVER_PORT', self.server_port)
410
self.saved_cookie = env.get('HTTP_COOKIE', '')
411
self.script_name = env.get('SCRIPT_NAME', '')
412
self.path_info = env.get('PATH_INFO', '')
413
self.query_string = env.get('QUERY_STRING', '')
414
self.request_method = env.get('REQUEST_METHOD', None)
415
self.remote_addr = env.get('REMOTE_ADDR', '')
416
self.http_user_agent = env.get('HTTP_USER_AGENT', '')
418
self.content_length = int(env.get('CONTENT_LENGTH'))
419
except (TypeError, ValueError):
420
self.content_length = None
421
self.if_modified_since = env.get('If-modified-since') or env.get(cgiMetaVariable('If-modified-since'))
422
self.if_none_match = env.get('If-none-match') or env.get(cgiMetaVariable('If-none-match'))
424
# REQUEST_URI is not part of CGI spec, but an addition of Apache.
425
self.request_uri = env.get('REQUEST_URI', '')
427
# Values that need more work
428
self.setHttpReferer(env.get('HTTP_REFERER'))
430
self.setHost(env.get('HTTP_HOST'))
434
#self.debugEnvironment(env)
436
def setHttpReferer(self, referer):
437
""" Set http_referer, making sure its ascii
439
IE might send non-ascii value.
443
value = unicode(referer, 'ascii', 'replace')
444
value = value.encode('ascii', 'replace')
445
self.http_referer = value
447
def setIsSSL(self, env):
450
@param env: dict like object containing cgi meta variables
452
self.is_ssl = bool(env.get('SSL_PROTOCOL') or
453
env.get('SSL_PROTOCOL_VERSION') or
454
env.get('HTTPS') == 'on')
456
def setHost(self, host=None):
459
Create from server name and port if missing. Previous code
460
default to localhost.
464
standardPort = ('80', '443')[self.is_ssl]
465
if self.server_port != standardPort:
466
port = ':' + self.server_port
467
host = self.server_name + port
468
self.http_host = host
470
def fixURI(self, env):
471
""" Fix problems with script_name and path_info
473
Handle the strange charset semantics on Windows and other non
474
posix systems. path_info is transformed into the system code
475
page by the web server. Additionally, paths containing dots let
476
most webservers choke.
478
Broken environment variables in different environments:
479
path_info script_name
480
Apache1 X X PI does not contain dots
481
Apache2 X X PI is not encoded correctly
482
IIS X X path_info include script_name
483
Other ? - ? := Possible and even RFC-compatible.
486
@param env: dict like object containing cgi meta variables
488
# Fix the script_name when using Apache on Windows.
489
server_software = env.get('SERVER_SOFTWARE', '')
490
if os.name == 'nt' and 'Apache/' in server_software:
491
# Removes elements ending in '.' from the path.
492
self.script_name = '/'.join([x for x in self.script_name.split('/')
493
if not x.endswith('.')])
496
if os.name != 'posix' and self.request_uri != '':
497
# Try to recreate path_info from request_uri.
499
scriptAndPath = urlparse.urlparse(self.request_uri)[2]
500
path = scriptAndPath.replace(self.script_name, '', 1)
501
self.path_info = wikiutil.url_unquote(path, want_unicode=False)
502
elif os.name == 'nt':
503
# Recode path_info to utf-8
504
path = wikiutil.decodeWindowsPath(self.path_info)
505
self.path_info = path.encode("utf-8")
507
# Fix bug in IIS/4.0 when path_info contain script_name
508
if self.path_info.startswith(self.script_name):
509
self.path_info = self.path_info[len(self.script_name):]
511
def setURL(self, env):
512
""" Set url, used to locate wiki config
514
This is the place to manipulate url parts as needed.
516
@param env: dict like object containing cgi meta variables or http headers.
519
self.rewriteRemoteAddr(env)
520
self.rewriteHost(env)
524
if not self.request_uri:
525
self.request_uri = self.makeURI()
526
self.url = self.http_host + self.request_uri
528
def rewriteHost(self, env):
529
""" Rewrite http_host transparently
531
Get the proxy host using 'X-Forwarded-Host' header, added by
532
Apache 2 and other proxy software.
534
TODO: Will not work for Apache 1 or others that don't add this header.
536
TODO: If we want to add an option to disable this feature it
537
should be in the server script, because the config is not
538
loaded at this point, and must be loaded after url is set.
540
@param env: dict like object containing cgi meta variables or http headers.
542
proxy_host = (env.get(self.proxy_host) or
543
env.get(cgiMetaVariable(self.proxy_host)))
545
self.http_host = proxy_host
547
def rewriteRemoteAddr(self, env):
548
""" Rewrite remote_addr transparently
550
Get the proxy remote addr using 'X-Forwarded-For' header, added by
551
Apache 2 and other proxy software.
553
TODO: Will not work for Apache 1 or others that don't add this header.
555
TODO: If we want to add an option to disable this feature it
556
should be in the server script, because the config is not
557
loaded at this point, and must be loaded after url is set.
559
@param env: dict like object containing cgi meta variables or http headers.
561
xff = (env.get(self.proxy_xff) or
562
env.get(cgiMetaVariable(self.proxy_xff)))
564
xff = [addr.strip() for addr in xff.split(',')]
565
xff.append(self.remote_addr)
566
self.remote_addr = find_remote_addr(xff)
568
def rewriteURI(self, env):
569
""" Rewrite request_uri, script_name and path_info transparently
571
Useful when running mod python or when running behind a proxy,
572
e.g run on localhost:8000/ and serve as example.com/wiki/.
574
Uses private 'X-Moin-Location' header to set the script name.
575
This allow setting the script name when using Apache 2
576
<location> directive::
579
RequestHeader set X-Moin-Location /my/wiki/
582
TODO: does not work for Apache 1 and others that do not allow
583
setting custom headers per request.
585
@param env: dict like object containing cgi meta variables or http headers.
587
location = (env.get(self.moin_location) or
588
env.get(cgiMetaVariable(self.moin_location)))
592
scriptAndPath = self.script_name + self.path_info
593
location = location.rstrip('/')
594
self.script_name = location
596
# This may happen when using mod_python
597
if scriptAndPath.startswith(location):
598
self.path_info = scriptAndPath[len(location):]
600
# Recreate the URI from the modified parts
602
self.request_uri = self.makeURI()
605
""" Return uri created from uri parts """
606
uri = self.script_name + wikiutil.url_quote(self.path_info)
607
if self.query_string:
608
uri += '?' + self.query_string
611
def splitURI(self, uri):
612
""" Return path and query splited from uri
614
Just like CGI environment, the path is unquoted, the query is not.
617
path, query = uri.split('?', 1)
619
path, query = uri, ''
620
return wikiutil.url_unquote(path, want_unicode=False), query
622
def _handle_auth_form(self, user_obj):
623
username = self.form.get('name', [None])[0]
624
password = self.form.get('password', [None])[0]
625
oid = self.form.get('openid_identifier', [None])[0]
626
login = 'login' in self.form
627
logout = 'logout' in self.form
628
stage = self.form.get('stage', [None])[0]
629
return self.handle_auth(user_obj, attended=True, username=username,
630
password=password, login=login, logout=logout,
631
stage=stage, openid_identifier=oid)
633
def handle_auth(self, user_obj, attended=False, **kw):
634
username = kw.get('username')
635
password = kw.get('password')
636
oid = kw.get('openid_identifier')
637
login = kw.get('login')
638
logout = kw.get('logout')
639
stage = kw.get('stage')
641
'cookie': self.cookie,
644
extra['attended'] = attended
645
extra['username'] = username
646
extra['password'] = password
647
extra['openid_identifier'] = oid
649
extra['multistage'] = True
651
self._login_multistage = None
653
if logout and 'setuid' in self.session:
654
del self.session['setuid']
657
for authmethod in self.cfg.auth:
659
user_obj, cont = authmethod.logout(self, user_obj, **extra)
661
if stage and authmethod.name != stage:
663
ret = authmethod.login(self, user_obj, **extra)
664
user_obj = ret.user_obj
665
cont = ret.continue_flag
668
del extra['multistage']
670
self._login_multistage = ret.multistage
671
self._login_multistage_name = authmethod.name
674
nextstage = auth.get_multistage_continuation_url(self, authmethod.name)
675
url = ret.redirect_to
676
url = url.replace('%return_form', quote_plus(nextstage))
677
url = url.replace('%return', quote(nextstage))
678
self._auth_redirected = True
679
self.http_redirect(url)
682
if msg and not msg in login_msgs:
683
login_msgs.append(msg)
685
user_obj, cont = authmethod.request(self, user_obj, **extra)
689
self._login_messages = login_msgs
692
def handle_jid_auth(self, jid):
693
return user.get_by_jabber_id(self, jid)
695
def parse_cookie(self):
697
self.cookie = Cookie.SimpleCookie(self.saved_cookie)
698
except Cookie.CookieError:
702
""" Reset request state.
704
Called after saving a page, before serving the updated
705
page. Solves some practical problems with request state
706
modified during saving.
709
# This is the content language and has nothing to do with
710
# The user interface language. The content language can change
711
# during the rendering of a page by lang macros
712
self.current_lang = self.cfg.language_default
715
self.init_unique_ids()
717
if hasattr(self, "_fmt_hd_counters"):
718
del self._fmt_hd_counters
720
def loadTheme(self, theme_name):
721
""" Load the Theme to use for this request.
723
@param theme_name: the name of the theme
724
@type theme_name: str
726
@return: success code
728
1 if user theme could not be loaded,
729
2 if a hard fallback to modern theme was required.
732
if theme_name == "<default>":
733
theme_name = self.cfg.theme_default
736
Theme = wikiutil.importPlugin(self.cfg, 'theme', theme_name, 'Theme')
737
except wikiutil.PluginMissingError:
740
Theme = wikiutil.importPlugin(self.cfg, 'theme', self.cfg.theme_default, 'Theme')
741
except wikiutil.PluginMissingError:
743
from MoinMoin.theme.modern import Theme
745
self.theme = Theme(self)
748
def setContentLanguage(self, lang):
749
""" Set the content language, used for the content div
751
Actions that generate content in the user language, like search,
752
should set the content direction to the user language before they
755
self.content_lang = lang
756
self.current_lang = lang
758
def getPragma(self, key, defval=None):
759
""" Query a pragma value (#pragma processing instruction)
761
Keys are not case-sensitive.
763
return self.pragma.get(key.lower(), defval)
765
def setPragma(self, key, value):
766
""" Set a pragma value (#pragma processing instruction)
768
Keys are not case-sensitive.
770
self.pragma[key.lower()] = value
772
def getPathinfo(self):
773
""" Return the remaining part of the URL. """
774
return self.path_info
776
def getScriptname(self):
777
""" Return the scriptname part of the URL ('/path/to/my.cgi'). """
778
if self.script_name == '/':
780
return self.script_name
782
def getKnownActions(self):
783
""" Create a dict of avaiable actions
785
Return cached version if avaiable.
788
@return: dict of all known actions
791
self.cfg.cache.known_actions # check
792
except AttributeError:
793
from MoinMoin import action
794
self.cfg.cache.known_actions = set(action.getNames(self.cfg))
796
# Return a copy, so clients will not change the set.
797
return self.cfg.cache.known_actions.copy()
799
def getAvailableActions(self, page):
800
""" Get list of avaiable actions for this request
802
The dict does not contain actions that starts with lower case.
803
Themes use this dict to display the actions to the user.
805
@param page: current page, Page object
807
@return: dict of avaiable actions
809
if self._available_actions is None:
810
# some actions might make sense for non-existing pages, so we just
811
# require read access here. Can be later refined to some action
813
if not self.user.may.read(page.page_name):
816
# Filter non ui actions (starts with lower case letter)
817
actions = self.getKnownActions()
818
actions = [action for action in actions if not action[0].islower()]
820
# Filter wiki excluded actions
821
actions = [action for action in actions if not action in self.cfg.actions_excluded]
823
# Filter actions by page type, acl and user state
825
if ((page.isUnderlayPage() and not page.isStandardPage()) or
826
not self.user.may.write(page.page_name) or
827
not self.user.may.delete(page.page_name)):
828
# Prevent modification of underlay only pages, or pages
829
# the user can't write and can't delete
830
excluded = [u'RenamePage', u'DeletePage', ] # AttachFile must NOT be here!
831
actions = [action for action in actions if not action in excluded]
833
self._available_actions = set(actions)
835
# Return a copy, so clients will not change the dict.
836
return self._available_actions.copy()
838
def redirectedOutput(self, function, *args, **kw):
839
""" Redirect output during function, return redirected output """
840
buf = StringIO.StringIO()
843
function(*args, **kw)
846
text = buf.getvalue()
850
def redirect(self, file=None):
851
""" Redirect output to file, or restore saved output """
853
self.writestack.append(self.write)
854
self.write = file.write
856
self.write = self.writestack.pop()
859
""" DEPRECATED - Log msg to logging framework
860
Please call logging.info(...) directly!
864
if isinstance(msg, unicode):
865
msg = msg.encode(config.charset)
868
def timing_log(self, start, action):
869
""" Log to timing log (for performance analysis) """
874
self.clock.stop('total') # make sure it is stopped
875
total_secs = self.clock.timings['total']
876
# we add some stuff that is easy to grep when searching for peformance problems:
879
elif total_secs > 20:
881
elif total_secs > 10:
885
total = self.clock.value('total')
886
# use + for existing pages, - for non-existing pages
887
if self.page is not None:
888
indicator += self.page.exists() and '+' or '-'
889
if self.isSpiderAgent:
893
msg = 'Timing %5d %-6s %4s %-10s %s\n' % (pid, total, indicator, action, self.url)
896
def send_file(self, fileobj, bufsize=8192, do_flush=False):
897
""" Send a file to the output stream.
899
@param fileobj: a file-like object (supporting read, close)
900
@param bufsize: size of chunks to read/write
901
@param do_flush: call flush after writing?
904
buf = fileobj.read(bufsize)
911
def write(self, *data):
912
""" Write to output stream. """
913
raise NotImplementedError
915
def encode(self, data):
916
""" encode data (can be both unicode strings and strings),
917
preparing for a single write()
922
if isinstance(d, unicode):
923
# if we are REALLY sure, we can use "strict"
924
d = d.encode(config.charset, 'replace')
929
logging.error("Unicode error on: %s" % repr(d))
932
def decodePagename(self, name):
933
""" Decode path, possibly using non ascii characters
935
Does not change the name, only decode to Unicode.
937
First split the path to pages, then decode each one. This enables
938
us to decode one page using config.charset and another using
939
utf-8. This situation happens when you try to add to a name of
942
See http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
944
@param name: page name, string
946
@return decoded page name
948
# Split to pages and decode each one
949
pages = name.split('/')
952
# Recode from utf-8 into config charset. If the path
953
# contains user typed parts, they are encoded using 'utf-8'.
954
if config.charset != 'utf-8':
956
page = unicode(page, 'utf-8', 'strict')
957
# Fit data into config.charset, replacing what won't
958
# fit. Better have few "?" in the name than crash.
959
page = page.encode(config.charset, 'replace')
963
# Decode from config.charset, replacing what can't be decoded.
964
page = unicode(page, config.charset, 'replace')
967
# Assemble decoded parts
968
name = u'/'.join(decoded)
971
def normalizePagename(self, name):
972
""" Normalize page name
974
Prevent creating page names with invisible characters or funny
975
whitespace that might confuse the users or abuse the wiki, or
976
just does not make sense.
978
Restrict even more group pages, so they can be used inside acl lines.
980
@param name: page name, unicode
982
@return: decoded and sanitized page name
984
# Strip invalid characters
985
name = config.page_invalid_chars_regex.sub(u'', name)
987
# Split to pages and normalize each one
988
pages = name.split(u'/')
991
# Ignore empty or whitespace only pages
992
if not page or page.isspace():
995
# Cleanup group pages.
996
# Strip non alpha numeric characters, keep white space
997
if wikiutil.isGroupPage(self, page):
998
page = u''.join([c for c in page
999
if c.isalnum() or c.isspace()])
1001
# Normalize white space. Each name can contain multiple
1002
# words separated with only one space. Split handle all
1003
# 30 unicode spaces (isspace() == True)
1004
page = u' '.join(page.split())
1006
normalized.append(page)
1008
# Assemble components into full pagename
1009
name = u'/'.join(normalized)
1013
""" Read n bytes from input stream. """
1014
raise NotImplementedError
1017
""" Flush output stream. """
1020
def check_spider(self):
1021
""" check if the user agent for current request is a spider/bot """
1023
ua = self.getUserAgent()
1024
if ua and self.cfg.cache.ua_spiders:
1025
isSpider = self.cfg.cache.ua_spiders.search(ua) is not None
1028
def isForbidden(self):
1029
""" check for web spiders and refuse anything except viewing """
1031
# we do not have a parsed query string here, so we can just do simple matching
1032
qs = self.query_string
1033
action = self.action
1034
if ((qs != '' or self.request_method != 'GET') and
1035
action != 'rss_rc' and
1036
# allow spiders to get attachments and do 'show'
1037
not (action == 'AttachFile' and 'do=get' in qs) and
1038
action != 'show' and
1041
forbidden = self.isSpiderAgent
1043
if not forbidden and self.cfg.hosts_deny:
1044
ip = self.remote_addr
1045
for host in self.cfg.hosts_deny:
1046
if host[-1] == '.' and ip.startswith(host):
1048
logging.debug("hosts_deny (net): %s" % str(forbidden))
1052
logging.debug("hosts_deny (ip): %s" % str(forbidden))
1056
def setup_args(self):
1057
""" Return args dict
1058
First, we parse the query string (usually this is used in GET methods,
1059
but TwikiDraw uses ?action=AttachFile&do=savedrawing plus posted stuff).
1060
Second, we update what we got in first step by the stuff we get from
1061
the form (or by a POST). We invoke _setup_args_from_cgi_form to handle
1062
possible file uploads.
1064
args = cgi.parse_qs(self.query_string, keep_blank_values=1)
1065
args = self.decodeArgs(args)
1066
# if we have form data (in a POST), those override the stuff we already have:
1067
if self.request_method == 'POST':
1068
postargs = self._setup_args_from_cgi_form()
1069
args.update(postargs)
1072
def _setup_args_from_cgi_form(self, form=None):
1073
""" Return args dict from a FieldStorage
1075
Create the args from a given form. Each key contain a list of values.
1076
This method usually gets overridden in classes derived from this - it
1077
is their task to call this method with an appropriate form parameter.
1079
@param form: a cgi.FieldStorage
1081
@return: dict with form keys, each contains a list of values
1086
if not isinstance(values, list):
1090
if isinstance(item, cgi.FieldStorage) and item.filename:
1091
fixedResult.append(item.file) # open data tempfile
1092
# Save upload file name in a separate key
1093
args[key + '__filename__'] = item.filename
1095
fixedResult.append(item.value)
1096
args[key] = fixedResult
1098
return self.decodeArgs(args)
1100
def decodeArgs(self, args):
1101
""" Decode args dict
1103
Decoding is done in a separate path because it is reused by
1104
other methods and sub classes.
1106
decode = wikiutil.decodeUserInput
1109
if key + '__filename__' in args:
1110
# Copy file data as is
1111
result[key] = args[key]
1112
elif key.endswith('__filename__'):
1113
result[key] = decode(args[key], self.decode_charsets)
1115
result[key] = [decode(value, self.decode_charsets) for value in args[key]]
1118
def getBaseURL(self):
1119
""" Return a fully qualified URL to this script. """
1120
return self.getQualifiedURL(self.getScriptname())
1122
def getQualifiedURL(self, uri=''):
1123
""" Return an absolute URL starting with schema and host.
1125
Already qualified urls are returned unchanged.
1127
@param uri: server rooted uri e.g /scriptname/pagename.
1128
It must start with a slash. Must be ascii and url encoded.
1131
scheme = urlparse.urlparse(uri)[0]
1135
scheme = ('http', 'https')[self.is_ssl]
1136
result = "%s://%s%s" % (scheme, self.http_host, uri)
1138
# This might break qualified urls in redirects!
1139
# e.g. mapping 'http://netloc' -> '/'
1140
return wikiutil.mapURL(self, result)
1142
def getUserAgent(self):
1143
""" Get the user agent. """
1144
return self.http_user_agent
1146
def makeForbidden(self, resultcode, msg):
1148
401: 'Authorization required',
1151
503: 'Service unavailable',
1154
'Status: %d %s' % (resultcode, statusmsg[resultcode]),
1155
'Content-Type: text/plain; charset=utf-8'
1157
# when surge protection triggered, tell bots to come back later...
1158
if resultcode == 503:
1159
headers.append('Retry-After: %d' % self.cfg.surge_lockout_time)
1160
self.emit_http_headers(headers)
1162
self.forbidden = True
1164
def makeForbidden403(self):
1165
self.makeForbidden(403, 'You are not allowed to access this!\r\n')
1167
def makeUnavailable503(self):
1168
self.makeForbidden(503, "Warning:\r\n"
1169
"You triggered the wiki's surge protection by doing too many requests in a short time.\r\n"
1170
"Please make a short break reading the stuff you already got.\r\n"
1171
"When you restart doing requests AFTER that, slow down or you might get locked out for a longer time!\r\n")
1173
def initTheme(self):
1174
""" Set theme - forced theme, user theme or wiki default """
1175
if self.cfg.theme_force:
1176
theme_name = self.cfg.theme_default
1178
theme_name = self.user.theme_name
1179
self.loadTheme(theme_name)
1181
def _try_redirect_spaces_page(self, pagename):
1182
if '_' in pagename and not self.page.exists():
1183
pname = pagename.replace('_', ' ')
1184
pg = Page(self, pname)
1187
self.http_redirect(url)
1192
# Exit now if __init__ failed or request is forbidden
1193
if self.failed or self.forbidden or self._auth_redirected:
1194
# Don't sleep() here, it binds too much of our resources!
1195
return self.finish()
1198
self.clock.start('run')
1202
action_name = self.action
1203
if self.cfg.log_timing:
1204
self.timing_log(True, action_name)
1206
if action_name == 'xmlrpc':
1207
from MoinMoin import xmlrpc
1208
if self.query_string == 'action=xmlrpc':
1210
elif self.query_string == 'action=xmlrpc2':
1211
xmlrpc.xmlrpc2(self)
1212
if self.cfg.log_timing:
1213
self.timing_log(False, action_name)
1214
return self.finish()
1216
# parse request data
1218
# The last component in path_info is the page name, if any
1219
path = self.getPathinfo()
1221
# we can have all action URLs like this: /action/ActionName/PageName?action=ActionName&...
1222
# this is just for robots.txt being able to forbid them for crawlers
1223
prefix = self.cfg.url_prefix_action
1224
if prefix is not None:
1225
prefix = '/%s/' % prefix # e.g. '/action/'
1226
if path.startswith(prefix):
1227
# remove prefix and action name
1228
path = path[len(prefix):]
1229
action, path = (path.split('/', 1) + ['', ''])[:2]
1232
if path.startswith('/'):
1233
pagename = self.normalizePagename(path)
1237
# need to inform caches that content changes based on:
1238
# * cookie (even if we aren't sending one now)
1239
# * User-Agent (because a bot might be denied and get no content)
1240
# * Accept-Language (except if moin is told to ignore browser language)
1241
if self.cfg.language_ignore_browser:
1242
self.setHttpHeader("Vary: Cookie,User-Agent")
1244
self.setHttpHeader("Vary: Cookie,User-Agent,Accept-Language")
1246
# Handle request. We have these options:
1247
# 1. jump to page where user left off
1248
if not pagename and self.user.remember_last_visit and action_name == 'show':
1249
pagetrail = self.user.getTrail()
1251
# Redirect to last page visited
1252
last_visited = pagetrail[-1]
1253
wikiname, pagename = wikiutil.split_interwiki(last_visited)
1254
if wikiname != 'Self':
1255
wikitag, wikiurl, wikitail, error = wikiutil.resolve_interwiki(self, wikiname, pagename)
1256
url = wikiurl + wikiutil.quoteWikinameURL(wikitail)
1258
url = Page(self, pagename).url(self)
1260
# Or to localized FrontPage
1261
url = wikiutil.getFrontPage(self).url(self)
1262
self.http_redirect(url)
1263
return self.finish()
1267
# pagename could be empty after normalization e.g. '///' -> ''
1268
# Use localized FrontPage if pagename is empty
1270
self.page = wikiutil.getFrontPage(self)
1272
self.page = Page(self, pagename)
1273
if self._try_redirect_spaces_page(pagename):
1274
return self.finish()
1277
# Complain about unknown actions
1278
if not action_name in self.getKnownActions():
1279
msg = _("Unknown action %(action_name)s.") % {
1280
'action_name': wikiutil.escape(action_name), }
1282
# Disallow non available actions
1283
elif action_name[0].isupper() and not action_name in self.getAvailableActions(self.page):
1284
msg = _("You are not allowed to do %(action_name)s on this page.") % {
1285
'action_name': wikiutil.escape(action_name), }
1286
if not self.user.valid:
1287
# Suggest non valid user to login
1288
msg += " " + _("Login and try again.")
1291
self.theme.add_msg(msg, "error")
1292
self.page.send_page()
1295
from MoinMoin import action
1296
handler = action.getHandler(self, action_name)
1298
msg = _("You are not allowed to do %(action_name)s on this page.") % {
1299
'action_name': wikiutil.escape(action_name), }
1300
if not self.user.valid:
1301
# Suggest non valid user to login
1302
msg += " " + _("Login and try again.")
1303
self.theme.add_msg(msg, "error")
1304
self.page.send_page()
1306
handler(self.page.page_name, self)
1308
# every action that didn't use to raise MoinMoinFinish must call this now:
1309
# self.theme.send_closing_html()
1311
except MoinMoinFinish:
1313
except RemoteClosedConnection:
1317
raise # fcgi uses this to terminate a thread
1318
except Exception, err:
1320
# nothing we can do about further failures!
1325
if self.cfg.log_timing:
1326
self.timing_log(False, action_name)
1328
return self.finish()
1330
def http_redirect(self, url):
1331
""" Redirect to a fully qualified, or server-rooted URL
1333
@param url: relative or absolute url, ascii using url encoding.
1335
url = self.getQualifiedURL(url)
1336
self.emit_http_headers(["Status: 302 Found", "Location: %s" % url])
1338
def emit_http_headers(self, more_headers=[], testing=False):
1339
""" emit http headers after some preprocessing / checking
1341
Makes sure we only emit headers once.
1342
Encodes to ASCII if it gets unicode headers.
1343
Make sure we have exactly one Content-Type and one Status header.
1344
Make sure Status header string begins with a integer number.
1346
For emitting (testing == False), it calls the server specific
1347
_emit_http_headers method. For testing, it returns the result.
1349
@param more_headers: list of additional header strings
1350
@param testing: set to True by test code
1352
user_headers = self.user_headers
1353
self.user_headers = []
1354
tracehere = ''.join(traceback.format_stack()[:-1])
1355
all_headers = [(hdr, tracehere) for hdr in more_headers] + user_headers
1357
if self.sent_headers:
1358
# Send headers only once
1359
logging.error("Attempt to send headers twice!")
1360
logging.error("First attempt:\n%s" % self.sent_headers)
1361
logging.error("Second attempt:\n%s" % tracehere)
1362
raise HeadersAlreadySentException("emit_http_headers has already been called before!")
1364
self.sent_headers = tracehere
1366
# assemble dict of http headers
1369
for header, trace in all_headers:
1370
if isinstance(header, unicode):
1371
header = header.encode('ascii')
1372
key, value = header.split(':', 1)
1374
value = value.lstrip()
1376
if lkey in ['vary', 'cache-control', 'content-language', ]:
1377
# these headers (list might be incomplete) allow multiple values
1378
# that can be merged into a comma separated list
1379
headers[lkey] = headers[lkey][0], '%s, %s' % (headers[lkey][1], value)
1380
traces[lkey] = trace
1382
logging.warning("Duplicate http header: %r (ignored)" % header)
1383
logging.warning("Header added first at:\n%s" % traces[lkey])
1384
logging.warning("Header added again at:\n%s" % trace)
1386
headers[lkey] = (key, value)
1387
traces[lkey] = trace
1389
if 'content-type' not in headers:
1390
headers['content-type'] = ('Content-type', 'text/html; charset=%s' % config.charset)
1392
if 'status' not in headers:
1393
headers['status'] = ('Status', '200 OK')
1395
# check if we got a valid status
1397
status = headers['status'][1]
1398
int(status.split(' ', 1)[0])
1400
logging.error("emit_http_headers called with invalid header Status: %r" % status)
1401
headers['status'] = ('Status', '500 Server Error - invalid status header')
1403
header_format = '%s: %s'
1404
st_header = header_format % headers['status']
1405
del headers['status']
1406
ct_header = header_format % headers['content-type']
1407
del headers['content-type']
1409
headers = [header_format % kv_tuple for kv_tuple in headers.values()] # make a list of strings
1410
headers = [st_header, ct_header] + headers # do NOT change order!
1412
self._emit_http_headers(headers)
1416
def _emit_http_headers(self, headers):
1417
""" server specific method to emit http headers.
1419
@param headers: a list of http header strings in this FIXED order:
1420
1. status header (always present and valid, e.g. "200 OK")
1421
2. content type header (always present)
1422
3. other headers (optional)
1424
raise NotImplementedError
1426
def setHttpHeader(self, header):
1427
""" Save header for later send.
1429
Attention: although we use a list here, some implementations use a dict,
1430
thus multiple calls with the same header type do NOT work in the end!
1432
# save a traceback with the header for duplicate bug reporting
1433
self.user_headers.append((header, ''.join(traceback.format_stack()[:-1])))
1435
def fail(self, err):
1436
""" Fail when we can't continue
1438
Send 500 status code with the error name. Reference:
1439
http://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html#sec6.1.1
1441
Log the error, then let failure module handle it.
1443
@param err: Exception instance or subclass.
1445
self.failed = 1 # save state for self.run()
1446
# we should not generate the headers two times
1447
if not self.sent_headers:
1448
self.emit_http_headers(['Status: 500 MoinMoin Internal Error'])
1449
from MoinMoin import failure
1450
failure.handle(self, err)
1452
def make_unique_id(self, base, namespace=None):
1454
Generates a unique ID using a given base name. Appends a running count to the base.
1456
Needs to stay deterministic!
1458
@param base: the base of the id
1460
@param namespace: the namespace for the ID, used when including pages
1462
@returns: a unique (relatively to the namespace) ID
1465
if not isinstance(base, unicode):
1466
base = unicode(str(base), 'ascii', 'ignore')
1467
if not namespace in self._page_ids:
1468
self._page_ids[namespace] = {}
1469
count = self._page_ids[namespace].get(base, -1) + 1
1470
self._page_ids[namespace][base] = count
1473
return u'%s-%d' % (base, count)
1475
def init_unique_ids(self):
1476
'''Initialise everything needed for unique IDs'''
1477
self._unique_id_stack = []
1478
self._page_ids = {None: {}}
1479
self.include_id = None
1480
self._include_stack = []
1482
def push_unique_ids(self):
1484
Used by the TOC macro, this ensures that the ID namespaces
1485
are reset to the status when the current include started.
1486
This guarantees that doing the ID enumeration twice results
1487
in the same results, on any level.
1489
self._unique_id_stack.append((self._page_ids, self.include_id))
1490
self.include_id, pids = self._include_stack[-1]
1491
# make a copy of the containing ID namespaces, that is to say
1492
# go back to the level we had at the previous include
1494
for namespace in pids:
1495
self._page_ids[namespace] = pids[namespace].copy()
1497
def pop_unique_ids(self):
1499
Used by the TOC macro to reset the ID namespaces after
1500
having parsed the page for TOC generation and after
1503
self._page_ids, self.include_id = self._unique_id_stack.pop()
1505
def begin_include(self, base):
1507
Called by the formatter when a document begins, which means
1508
that include causing nested documents gives us an include
1509
stack in self._include_id_stack.
1512
for namespace in self._page_ids:
1513
pids[namespace] = self._page_ids[namespace].copy()
1514
self._include_stack.append((self.include_id, pids))
1515
self.include_id = self.make_unique_id(base)
1516
# if it's the page name then set it to None so we don't
1517
# prepend anything to IDs, but otherwise keep it.
1518
if self.page and self.page.page_name == self.include_id:
1519
self.include_id = None
1521
def end_include(self):
1523
Called by the formatter when a document ends, restores
1524
the current include ID to the previous one and discards
1525
the page IDs state we kept around for push_unique_ids().
1527
self.include_id, pids = self._include_stack.pop()
1529
def httpDate(self, when=None, rfc='1123'):
1530
""" Returns http date string, according to rfc2068
1532
See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-3.3
1534
A http 1.1 server should use only rfc1123 date, but cookie's
1535
"expires" field should use the older obsolete rfc850 date.
1537
Note: we can not use strftime() because that honors the locale
1538
and rfc2822 requires english day and month names.
1540
We can not use email.Utils.formatdate because it formats the
1541
zone as '-0000' instead of 'GMT', and creates only rfc1123
1542
dates. This is a modified version of email.Utils.formatdate
1545
@param when: seconds from epoch, as returned by time.time()
1546
@param rfc: conform to rfc ('1123' or '850')
1548
@return: http date conforming to rfc1123 or rfc850
1552
now = time.gmtime(when)
1553
month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
1554
'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now.tm_mon - 1]
1556
day = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now.tm_wday]
1557
date = '%02d %s %04d' % (now.tm_mday, month, now.tm_year)
1559
day = ["Monday", "Tuesday", "Wednesday", "Thursday",
1560
"Friday", "Saturday", "Sunday"][now.tm_wday]
1561
date = '%02d-%s-%s' % (now.tm_mday, month, str(now.tm_year)[-2:])
1563
raise ValueError("Invalid rfc value: %s" % rfc)
1565
return '%s, %s %02d:%02d:%02d GMT' % (day, date, now.tm_hour,
1566
now.tm_min, now.tm_sec)
1568
def disableHttpCaching(self, level=1):
1569
""" Prevent caching of pages that should not be cached.
1571
level == 1 means disabling caching when we have a cookie set
1572
level == 2 means completely disabling caching (used by Page*Editor)
1574
This is important to prevent caches break acl by providing one
1575
user pages meant to be seen only by another user, when both users
1576
share the same caching proxy.
1578
AVOID using no-cache and no-store for attachments as it is completely broken on IE!
1580
Details: http://support.microsoft.com/support/kb/articles/Q234/0/67.ASP
1582
if level <= self.http_caching_disabled:
1583
return # only make caching stricter
1586
# Set Cache control header for http 1.1 caches
1587
# See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2109.html#sec-4.2.3
1588
# and http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.9
1589
#self.setHttpHeader('Cache-Control: no-cache="set-cookie", private, max-age=0')
1590
self.setHttpHeader('Cache-Control: private, must-revalidate, max-age=10')
1592
self.setHttpHeader('Cache-Control: no-cache')
1594
# only do this once to avoid 'duplicate header' warnings
1595
# (in case the caching disabling is being made stricter)
1596
if not self.http_caching_disabled:
1597
# Set Expires for http 1.0 caches (does not support Cache-Control)
1598
when = time.time() - (3600 * 24 * 365)
1599
self.setHttpHeader('Expires: %s' % self.httpDate(when=when))
1601
# Set Pragma for http 1.0 caches
1602
# See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.32
1603
# DISABLED for level == 1 to fix IE https file attachment downloading trouble.
1605
self.setHttpHeader('Pragma: no-cache')
1607
self.http_caching_disabled = level
1610
""" General cleanup on end of request
1612
Delete circular references - all object that we create using self.name = class(self).
1613
This helps Python to collect these objects and keep our memory footprint lower.
1615
for method in self._finishers:
1617
# only execute finishers once
1618
self._finishers = []
1621
#del self.user # keeping this is useful for testing
1627
def add_finisher(self, method):
1628
self._finishers.append(method)
1630
# Debug ------------------------------------------------------------
1632
def debugEnvironment(self, env):
1633
""" Environment debugging aid """
1634
# Keep this one name per line so its easy to comment stuff
1636
# 'http_accept_language',
1639
# 'http_user_agent',
1654
attributes.append(' %s = %r\n' % (name, getattr(self, name, None)))
1655
attributes = ''.join(attributes)
1661
environment.append(' %s = %r\n' % (key, env[key]))
1662
environment = ''.join(environment)
1664
data = '\nRequest Attributes\n%s\nEnvironment\n%s' % (attributes, environment)
1665
f = open('/tmp/env.log', 'a')