1
# -*- coding: utf-8 -*-
3
pygments.lexers.templates
4
~~~~~~~~~~~~~~~~~~~~~~~~~
6
Lexers for various template engines' markup.
8
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
9
:license: BSD, see LICENSE for details.
14
from pygments.lexers.web import \
15
PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
16
from pygments.lexers.agile import PythonLexer
17
from pygments.lexers.compiled import JavaLexer
18
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
20
from pygments.token import Error, Punctuation, \
21
Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
22
from pygments.util import html_doctype_matches, looks_like_xml
24
__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
25
'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
26
'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
27
'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
28
'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
29
'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
30
'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
31
'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
32
'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
33
'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MakoLexer',
34
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
35
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
36
'CheetahXmlLexer', 'CheetahJavascriptLexer',
37
'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer',
38
'ColdfusionLexer', 'ColdfusionHtmlLexer']
41
class ErbLexer(Lexer):
43
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
46
Just highlights ruby code between the preprocessor directives, other data
47
is left untouched by the lexer.
49
All options are also forwarded to the `RubyLexer`.
54
mimetypes = ['application/x-ruby-templating']
56
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
58
def __init__(self, **options):
59
from pygments.lexers.agile import RubyLexer
60
self.ruby_lexer = RubyLexer(**options)
61
Lexer.__init__(self, **options)
63
def get_tokens_unprocessed(self, text):
65
Since ERB doesn't allow "<%" and other tags inside of ruby
66
blocks we have to use a split approach here that fails for
69
tokens = self._block_re.split(text)
84
if tag in ('<%%', '%%>'):
90
yield idx, Comment.Preproc, tag
92
yield idx + 3, Comment, val
96
elif tag in ('<%', '<%=', '<%-'):
97
yield idx, Comment.Preproc, tag
101
for r_idx, r_token, r_value in \
102
self.ruby_lexer.get_tokens_unprocessed(data):
103
yield r_idx + idx, r_token, r_value
106
elif tag in ('%>', '-%>'):
107
yield idx, Error, tag
110
# % raw ruby statements
112
yield idx, Comment.Preproc, tag[0]
114
for r_idx, r_token, r_value in \
115
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
116
yield idx + 1 + r_idx, r_token, r_value
122
if tag not in ('%>', '-%>'):
123
yield idx, Other, tag
125
yield idx, Comment.Preproc, tag
131
def analyse_text(text):
132
if '<%' in text and '%>' in text:
136
class SmartyLexer(RegexLexer):
138
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
140
Just highlights smarty code between the preprocessor directives, other
141
data is left untouched by the lexer.
146
filenames = ['*.tpl']
147
mimetypes = ['application/x-smarty']
149
flags = re.MULTILINE | re.DOTALL
154
(r'(\{)(\*.*?\*)(\})',
155
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
156
(r'(\{php\})(.*?)(\{/php\})',
157
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
159
(r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
160
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
161
(r'\{', Comment.Preproc, 'smarty')
165
(r'\}', Comment.Preproc, '#pop'),
166
(r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
167
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
168
(r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
169
('(true|false|null)\b', Keyword.Constant),
170
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
171
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
172
(r'"(\\\\|\\"|[^"])*"', String.Double),
173
(r"'(\\\\|\\'|[^'])*'", String.Single),
174
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
178
def analyse_text(text):
180
if re.search('\{if\s+.*?\}.*?\{/if\}', text):
182
if re.search('\{include\s+file=.*?\}', text):
184
if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
186
if re.search('\{\$.*?\}', text):
191
class DjangoLexer(RegexLexer):
193
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
194
and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
196
It just highlights django/jinja code between the preprocessor directives,
197
other data is left untouched by the lexer.
200
name = 'Django/Jinja'
201
aliases = ['django', 'jinja']
202
mimetypes = ['application/x-django-templating', 'application/x-jinja']
209
(r'\{\{', Comment.Preproc, 'var'),
210
# jinja/django comments
211
(r'\{[*#].*?[*#]\}', Comment),
213
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
214
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
215
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
216
Comment, Comment.Preproc, Text, Keyword, Text,
219
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
220
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
221
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
222
Text, Comment.Preproc, Text, Keyword, Text,
225
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
226
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
228
(r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
229
bygroups(Comment.Preproc, Text, Keyword), 'block'),
233
(r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
234
bygroups(Operator, Text, Name.Function)),
235
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
236
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
237
(r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
238
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
239
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
241
(r'(loop|block|super|forloop)\b', Name.Builtin),
242
(r'[a-zA-Z][a-zA-Z0-9_]*', Name.Variable),
243
(r'\.[a-zA-Z0-9_]+', Name.Variable),
244
(r':?"(\\\\|\\"|[^"])*"', String.Double),
245
(r":?'(\\\\|\\'|[^'])*'", String.Single),
246
(r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
247
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
248
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
252
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
257
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
263
def analyse_text(text):
265
if re.search(r'\{%\s*(block|extends)', text) is not None:
267
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
269
if re.search(r'\{\{.*?\}\}', text) is not None:
274
class MyghtyLexer(RegexLexer):
276
Generic `myghty templates`_ lexer. Code that isn't Myghty
277
markup is yielded as `Token.Other`.
279
*New in Pygments 0.6.*
281
.. _myghty templates: http://www.myghty.org/
286
filenames = ['*.myt', 'autodelegate']
287
mimetypes = ['application/x-myghty']
292
(r'(<%(def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
293
bygroups(Name.Tag, None, Text, Name.Function, Name.Tag,
294
using(this), Name.Tag)),
295
(r'(<%(\w+))(.*?)(>)(.*?)(</%\2\s*>)(?s)',
296
bygroups(Name.Tag, None, Name.Function, Name.Tag,
297
using(PythonLexer), Name.Tag)),
298
(r'(<&[^|])(.*?)(,.*?)?(&>)',
299
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
300
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
301
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
303
(r'(<%!?)(.*?)(%>)(?s)',
304
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
305
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
306
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
307
bygroups(Name.Tag, using(PythonLexer), Other)),
309
(.+?) # anything, followed by:
311
(?<=\n)(?=[%#]) | # an eval or comment line
312
(?=</?[%&]) | # a substitution or block or
315
(\\\n) | # an escaped newline
317
)""", bygroups(Other, Operator)),
322
class MyghtyHtmlLexer(DelegatingLexer):
324
Subclass of the `MyghtyLexer` that highlights unlexer data
325
with the `HtmlLexer`.
327
*New in Pygments 0.6.*
331
aliases = ['html+myghty']
332
mimetypes = ['text/html+myghty']
334
def __init__(self, **options):
335
super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
339
class MyghtyXmlLexer(DelegatingLexer):
341
Subclass of the `MyghtyLexer` that highlights unlexer data
344
*New in Pygments 0.6.*
348
aliases = ['xml+myghty']
349
mimetypes = ['application/xml+myghty']
351
def __init__(self, **options):
352
super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
356
class MyghtyJavascriptLexer(DelegatingLexer):
358
Subclass of the `MyghtyLexer` that highlights unlexer data
359
with the `JavascriptLexer`.
361
*New in Pygments 0.6.*
364
name = 'JavaScript+Myghty'
365
aliases = ['js+myghty', 'javascript+myghty']
366
mimetypes = ['application/x-javascript+myghty',
367
'text/x-javascript+myghty',
368
'text/javascript+mygthy']
370
def __init__(self, **options):
371
super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
372
MyghtyLexer, **options)
375
class MyghtyCssLexer(DelegatingLexer):
377
Subclass of the `MyghtyLexer` that highlights unlexer data
380
*New in Pygments 0.6.*
384
aliases = ['css+myghty']
385
mimetypes = ['text/css+myghty']
387
def __init__(self, **options):
388
super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
392
class MakoLexer(RegexLexer):
394
Generic `mako templates`_ lexer. Code that isn't Mako
395
markup is yielded as `Token.Other`.
397
*New in Pygments 0.7.*
399
.. _mako templates: http://www.makotemplates.org/
404
filenames = ['*.mao']
405
mimetypes = ['application/x-mako']
409
(r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
410
bygroups(Text, Comment.Preproc, Keyword, Other)),
411
(r'(\s*)(%)([^\n]*)(\n|\Z)',
412
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
413
(r'(\s*)(##[^\n]*)(\n|\Z)',
414
bygroups(Text, Comment.Preproc, Other)),
415
(r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
417
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
418
(r'(</%)([\w\.\:]+)(>)',
419
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
420
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
421
(r'(<%(?:!?))(.*?)(%>)(?s)',
422
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
424
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
426
(.+?) # anything, followed by:
428
(?<=\n)(?=%|\#\#) | # an eval or comment line
429
(?=\#\*) | # multiline comment
430
(?=</?%) | # a python block
432
(?=\$\{) | # a substitution
435
(\\\n) | # an escaped newline
438
''', bygroups(Other, Operator)),
442
(r'<%', Comment.Preproc),
443
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
447
(r'((?:\w+)\s*=)\s*(".*?")',
448
bygroups(Name.Attribute, String)),
449
(r'/?\s*>', Comment.Preproc, '#pop'),
453
('".*?"', String, '#pop'),
454
("'.*?'", String, '#pop'),
455
(r'[^\s>]+', String, '#pop'),
460
class MakoHtmlLexer(DelegatingLexer):
462
Subclass of the `MakoLexer` that highlights unlexed data
463
with the `HtmlLexer`.
465
*New in Pygments 0.7.*
469
aliases = ['html+mako']
470
mimetypes = ['text/html+mako']
472
def __init__(self, **options):
473
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
476
class MakoXmlLexer(DelegatingLexer):
478
Subclass of the `MakoLexer` that highlights unlexer data
481
*New in Pygments 0.7.*
485
aliases = ['xml+mako']
486
mimetypes = ['application/xml+mako']
488
def __init__(self, **options):
489
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
492
class MakoJavascriptLexer(DelegatingLexer):
494
Subclass of the `MakoLexer` that highlights unlexer data
495
with the `JavascriptLexer`.
497
*New in Pygments 0.7.*
500
name = 'JavaScript+Mako'
501
aliases = ['js+mako', 'javascript+mako']
502
mimetypes = ['application/x-javascript+mako',
503
'text/x-javascript+mako',
504
'text/javascript+mako']
506
def __init__(self, **options):
507
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
508
MakoLexer, **options)
510
class MakoCssLexer(DelegatingLexer):
512
Subclass of the `MakoLexer` that highlights unlexer data
515
*New in Pygments 0.7.*
519
aliases = ['css+mako']
520
mimetypes = ['text/css+mako']
522
def __init__(self, **options):
523
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
527
# Genshi and Cheetah lexers courtesy of Matt Good.
529
class CheetahPythonLexer(Lexer):
531
Lexer for handling Cheetah's special $ tokens in Python syntax.
534
def get_tokens_unprocessed(self, text):
535
pylexer = PythonLexer(**self.options)
536
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
537
if type_ == Token.Error and value == '$':
538
type_ = Comment.Preproc
539
yield pos, type_, value
542
class CheetahLexer(RegexLexer):
544
Generic `cheetah templates`_ lexer. Code that isn't Cheetah
545
markup is yielded as `Token.Other`. This also works for
546
`spitfire templates`_ which use the same syntax.
548
.. _cheetah templates: http://www.cheetahtemplate.org/
549
.. _spitfire templates: http://code.google.com/p/spitfire/
553
aliases = ['cheetah', 'spitfire']
554
filenames = ['*.tmpl', '*.spt']
555
mimetypes = ['application/x-cheetah', 'application/x-spitfire']
560
(bygroups(Comment))),
561
(r'#[*](.|\n)*?[*]#', Comment),
562
(r'#end[^#\n]*(?:#|$)', Comment.Preproc),
563
(r'#slurp$', Comment.Preproc),
564
(r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
565
(bygroups(Comment.Preproc, using(CheetahPythonLexer),
567
# TODO support other Python syntax like $foo['bar']
568
(r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
569
bygroups(Comment.Preproc, using(CheetahPythonLexer))),
570
(r'(\$\{!?)(.*?)(\})(?s)',
571
bygroups(Comment.Preproc, using(CheetahPythonLexer),
574
(.+?) # anything, followed by:
576
(?=[#][#a-zA-Z]*) | # an eval comment
577
(?=\$[a-zA-Z_{]) | # a substitution
586
class CheetahHtmlLexer(DelegatingLexer):
588
Subclass of the `CheetahLexer` that highlights unlexer data
589
with the `HtmlLexer`.
592
name = 'HTML+Cheetah'
593
aliases = ['html+cheetah', 'html+spitfire']
594
mimetypes = ['text/html+cheetah', 'text/html+spitfire']
596
def __init__(self, **options):
597
super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
601
class CheetahXmlLexer(DelegatingLexer):
603
Subclass of the `CheetahLexer` that highlights unlexer data
608
aliases = ['xml+cheetah', 'xml+spitfire']
609
mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
611
def __init__(self, **options):
612
super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
616
class CheetahJavascriptLexer(DelegatingLexer):
618
Subclass of the `CheetahLexer` that highlights unlexer data
619
with the `JavascriptLexer`.
622
name = 'JavaScript+Cheetah'
623
aliases = ['js+cheetah', 'javascript+cheetah',
624
'js+spitfire', 'javascript+spitfire']
625
mimetypes = ['application/x-javascript+cheetah',
626
'text/x-javascript+cheetah',
627
'text/javascript+cheetah',
628
'application/x-javascript+spitfire',
629
'text/x-javascript+spitfire',
630
'text/javascript+spitfire']
632
def __init__(self, **options):
633
super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
634
CheetahLexer, **options)
637
class GenshiTextLexer(RegexLexer):
639
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
644
aliases = ['genshitext']
645
mimetypes = ['application/x-genshi-text', 'text/x-genshi']
649
(r'[^#\$\s]+', Other),
650
(r'^(\s*)(##.*)$', bygroups(Text, Comment)),
651
(r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
656
(r'\n', Text, '#pop'),
657
(r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
658
(r'(choose|when|with)([^\S\n]+)(.*)',
659
bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
660
(r'(choose|otherwise)\b', Keyword, '#pop'),
661
(r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
664
(r'(?<!\$)(\$\{)(.+?)(\})',
665
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
666
(r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
672
class GenshiMarkupLexer(RegexLexer):
674
Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
683
(r'(<\?python)(.*?)(\?>)',
684
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
685
# yield style and script blocks as Other
686
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
687
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
688
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
694
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
695
(r'/?\s*>', Name.Tag, '#pop'),
698
('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
699
("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
700
(r'[^\s>]+', String, '#pop'),
704
(r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
705
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
706
(r'/?\s*>', Name.Tag, '#pop'),
709
('"', String, 'attr-dstring'),
710
("'", String, 'attr-sstring'),
711
(r'[^\s>]*', String, '#pop')
714
('"', String, '#pop'),
719
("'", String, '#pop'),
724
('[^"\'$]+', String),
728
(r'(?<!\$)(\$\{)(.+?)(\})',
729
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
730
(r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
736
class HtmlGenshiLexer(DelegatingLexer):
738
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
739
`kid <http://kid-templating.org/>`_ kid HTML templates.
743
aliases = ['html+genshi', 'html+kid']
744
alias_filenames = ['*.html', '*.htm', '*.xhtml']
745
mimetypes = ['text/html+genshi']
747
def __init__(self, **options):
748
super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
751
def analyse_text(text):
753
if re.search('\$\{.*?\}', text) is not None:
755
if re.search('py:(.*?)=["\']', text) is not None:
757
return rv + HtmlLexer.analyse_text(text) - 0.01
760
class GenshiLexer(DelegatingLexer):
762
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
763
`kid <http://kid-templating.org/>`_ kid XML templates.
767
aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
768
filenames = ['*.kid']
769
alias_filenames = ['*.xml']
770
mimetypes = ['application/x-genshi', 'application/x-kid']
772
def __init__(self, **options):
773
super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
776
def analyse_text(text):
778
if re.search('\$\{.*?\}', text) is not None:
780
if re.search('py:(.*?)=["\']', text) is not None:
782
return rv + XmlLexer.analyse_text(text) - 0.01
785
class JavascriptGenshiLexer(DelegatingLexer):
787
A lexer that highlights javascript code in genshi text templates.
790
name = 'JavaScript+Genshi Text'
791
aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
793
alias_filenames = ['*.js']
794
mimetypes = ['application/x-javascript+genshi',
795
'text/x-javascript+genshi',
796
'text/javascript+genshi']
798
def __init__(self, **options):
799
super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
803
def analyse_text(text):
804
return GenshiLexer.analyse_text(text) - 0.05
807
class CssGenshiLexer(DelegatingLexer):
809
A lexer that highlights CSS definitions in genshi text templates.
812
name = 'CSS+Genshi Text'
813
aliases = ['css+genshitext', 'css+genshi']
814
alias_filenames = ['*.css']
815
mimetypes = ['text/css+genshi']
817
def __init__(self, **options):
818
super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
821
def analyse_text(text):
822
return GenshiLexer.analyse_text(text) - 0.05
825
class RhtmlLexer(DelegatingLexer):
827
Subclass of the ERB lexer that highlights the unlexed data with the
830
Nested Javascript and CSS is highlighted too.
834
aliases = ['rhtml', 'html+erb', 'html+ruby']
835
filenames = ['*.rhtml']
836
alias_filenames = ['*.html', '*.htm', '*.xhtml']
837
mimetypes = ['text/html+ruby']
839
def __init__(self, **options):
840
super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
842
def analyse_text(text):
843
rv = ErbLexer.analyse_text(text) - 0.01
844
if html_doctype_matches(text):
845
# one more than the XmlErbLexer returns
850
class XmlErbLexer(DelegatingLexer):
852
Subclass of `ErbLexer` which highlights data outside preprocessor
853
directives with the `XmlLexer`.
857
aliases = ['xml+erb', 'xml+ruby']
858
alias_filenames = ['*.xml']
859
mimetypes = ['application/xml+ruby']
861
def __init__(self, **options):
862
super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
864
def analyse_text(text):
865
rv = ErbLexer.analyse_text(text) - 0.01
866
if looks_like_xml(text):
871
class CssErbLexer(DelegatingLexer):
873
Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
877
aliases = ['css+erb', 'css+ruby']
878
alias_filenames = ['*.css']
879
mimetypes = ['text/css+ruby']
881
def __init__(self, **options):
882
super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
884
def analyse_text(text):
885
return ErbLexer.analyse_text(text) - 0.05
888
class JavascriptErbLexer(DelegatingLexer):
890
Subclass of `ErbLexer` which highlights unlexed data with the
894
name = 'JavaScript+Ruby'
895
aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
896
alias_filenames = ['*.js']
897
mimetypes = ['application/x-javascript+ruby',
898
'text/x-javascript+ruby',
899
'text/javascript+ruby']
901
def __init__(self, **options):
902
super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
905
def analyse_text(text):
906
return ErbLexer.analyse_text(text) - 0.05
909
class HtmlPhpLexer(DelegatingLexer):
911
Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
913
Nested Javascript and CSS is highlighted too.
917
aliases = ['html+php']
918
filenames = ['*.phtml']
919
alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
921
mimetypes = ['application/x-php',
922
'application/x-httpd-php', 'application/x-httpd-php3',
923
'application/x-httpd-php4', 'application/x-httpd-php5']
925
def __init__(self, **options):
926
super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
928
def analyse_text(text):
929
rv = PhpLexer.analyse_text(text) - 0.01
930
if html_doctype_matches(text):
935
class XmlPhpLexer(DelegatingLexer):
937
Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
941
aliases = ['xml+php']
942
alias_filenames = ['*.xml', '*.php', '*.php[345]']
943
mimetypes = ['application/xml+php']
945
def __init__(self, **options):
946
super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
948
def analyse_text(text):
949
rv = PhpLexer.analyse_text(text) - 0.01
950
if looks_like_xml(text):
955
class CssPhpLexer(DelegatingLexer):
957
Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
961
aliases = ['css+php']
962
alias_filenames = ['*.css']
963
mimetypes = ['text/css+php']
965
def __init__(self, **options):
966
super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
968
def analyse_text(text):
969
return PhpLexer.analyse_text(text) - 0.05
972
class JavascriptPhpLexer(DelegatingLexer):
974
Subclass of `PhpLexer` which highlights unmatched data with the
978
name = 'JavaScript+PHP'
979
aliases = ['js+php', 'javascript+php']
980
alias_filenames = ['*.js']
981
mimetypes = ['application/x-javascript+php',
982
'text/x-javascript+php',
983
'text/javascript+php']
985
def __init__(self, **options):
986
super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
989
def analyse_text(text):
990
return PhpLexer.analyse_text(text)
993
class HtmlSmartyLexer(DelegatingLexer):
995
Subclass of the `SmartyLexer` that highighlights unlexed data with the
998
Nested Javascript and CSS is highlighted too.
1001
name = 'HTML+Smarty'
1002
aliases = ['html+smarty']
1003
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
1004
mimetypes = ['text/html+smarty']
1006
def __init__(self, **options):
1007
super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
1009
def analyse_text(text):
1010
rv = SmartyLexer.analyse_text(text) - 0.01
1011
if html_doctype_matches(text):
1016
class XmlSmartyLexer(DelegatingLexer):
1018
Subclass of the `SmartyLexer` that highlights unlexed data with the
1023
aliases = ['xml+smarty']
1024
alias_filenames = ['*.xml', '*.tpl']
1025
mimetypes = ['application/xml+smarty']
1027
def __init__(self, **options):
1028
super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
1030
def analyse_text(text):
1031
rv = SmartyLexer.analyse_text(text) - 0.01
1032
if looks_like_xml(text):
1037
class CssSmartyLexer(DelegatingLexer):
1039
Subclass of the `SmartyLexer` that highlights unlexed data with the
1044
aliases = ['css+smarty']
1045
alias_filenames = ['*.css', '*.tpl']
1046
mimetypes = ['text/css+smarty']
1048
def __init__(self, **options):
1049
super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
1051
def analyse_text(text):
1052
return SmartyLexer.analyse_text(text) - 0.05
1055
class JavascriptSmartyLexer(DelegatingLexer):
1057
Subclass of the `SmartyLexer` that highlights unlexed data with the
1061
name = 'JavaScript+Smarty'
1062
aliases = ['js+smarty', 'javascript+smarty']
1063
alias_filenames = ['*.js', '*.tpl']
1064
mimetypes = ['application/x-javascript+smarty',
1065
'text/x-javascript+smarty',
1066
'text/javascript+smarty']
1068
def __init__(self, **options):
1069
super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
1072
def analyse_text(text):
1073
return SmartyLexer.analyse_text(text) - 0.05
1076
class HtmlDjangoLexer(DelegatingLexer):
1078
Subclass of the `DjangoLexer` that highighlights unlexed data with the
1081
Nested Javascript and CSS is highlighted too.
1084
name = 'HTML+Django/Jinja'
1085
aliases = ['html+django', 'html+jinja']
1086
alias_filenames = ['*.html', '*.htm', '*.xhtml']
1087
mimetypes = ['text/html+django', 'text/html+jinja']
1089
def __init__(self, **options):
1090
super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
1092
def analyse_text(text):
1093
rv = DjangoLexer.analyse_text(text) - 0.01
1094
if html_doctype_matches(text):
1099
class XmlDjangoLexer(DelegatingLexer):
1101
Subclass of the `DjangoLexer` that highlights unlexed data with the
1105
name = 'XML+Django/Jinja'
1106
aliases = ['xml+django', 'xml+jinja']
1107
alias_filenames = ['*.xml']
1108
mimetypes = ['application/xml+django', 'application/xml+jinja']
1110
def __init__(self, **options):
1111
super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
1113
def analyse_text(text):
1114
rv = DjangoLexer.analyse_text(text) - 0.01
1115
if looks_like_xml(text):
1120
class CssDjangoLexer(DelegatingLexer):
1122
Subclass of the `DjangoLexer` that highlights unlexed data with the
1126
name = 'CSS+Django/Jinja'
1127
aliases = ['css+django', 'css+jinja']
1128
alias_filenames = ['*.css']
1129
mimetypes = ['text/css+django', 'text/css+jinja']
1131
def __init__(self, **options):
1132
super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
1134
def analyse_text(text):
1135
return DjangoLexer.analyse_text(text) - 0.05
1138
class JavascriptDjangoLexer(DelegatingLexer):
1140
Subclass of the `DjangoLexer` that highlights unlexed data with the
1144
name = 'JavaScript+Django/Jinja'
1145
aliases = ['js+django', 'javascript+django',
1146
'js+jinja', 'javascript+jinja']
1147
alias_filenames = ['*.js']
1148
mimetypes = ['application/x-javascript+django',
1149
'application/x-javascript+jinja',
1150
'text/x-javascript+django',
1151
'text/x-javascript+jinja',
1152
'text/javascript+django',
1153
'text/javascript+jinja']
1155
def __init__(self, **options):
1156
super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
1159
def analyse_text(text):
1160
return DjangoLexer.analyse_text(text) - 0.05
1163
class JspRootLexer(RegexLexer):
1165
Base for the `JspLexer`. Yields `Token.Other` for area outside of
1168
*New in Pygments 0.7.*
1173
(r'<%\S?', Keyword, 'sec'),
1174
# FIXME: I want to make these keywords but still parse attributes.
1175
(r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
1181
(r'%>', Keyword, '#pop'),
1182
# note: '\w\W' != '.' without DOTALL.
1183
(r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
1188
class JspLexer(DelegatingLexer):
1190
Lexer for Java Server Pages.
1192
*New in Pygments 0.7.*
1194
name = 'Java Server Page'
1196
filenames = ['*.jsp']
1197
mimetypes = ['application/x-jsp']
1199
def __init__(self, **options):
1200
super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
1202
def analyse_text(text):
1203
rv = JavaLexer.analyse_text(text) - 0.01
1204
if looks_like_xml(text):
1206
if '<%' in text and '%>' in text:
1211
class EvoqueLexer(RegexLexer):
1213
For files using the Evoque templating system.
1215
*New in Pygments 1.1.*
1218
aliases = ['evoque']
1219
filenames = ['*.evoque']
1220
mimetypes = ['application/x-evoque']
1227
(r'#\[', Comment.Multiline, 'comment'),
1230
(r'\$\w+:[^$\n]*\$', Comment.Multiline),
1231
# directives: begin, end
1232
(r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
1233
bygroups(Punctuation, Name.Builtin, Punctuation, None,
1234
String, Punctuation, None)),
1235
# directives: evoque, overlay
1236
# see doc for handling first name arg: /directives/evoque/
1237
#+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
1238
# should be using(PythonLexer), not passed out as String
1239
(r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
1240
r'(.*?)((?(4)%)\})',
1241
bygroups(Punctuation, Name.Builtin, Punctuation, None,
1242
String, using(PythonLexer), Punctuation, None)),
1243
# directives: if, for, prefer, test
1244
(r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
1245
bygroups(Punctuation, Name.Builtin, Punctuation, None,
1246
using(PythonLexer), Punctuation, None)),
1247
# directive clauses (no {} expression)
1248
(r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
1250
(r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
1251
bygroups(Punctuation, None, using(PythonLexer),
1252
Name.Builtin, None, None, Punctuation, None)),
1256
(r'[^\]#]', Comment.Multiline),
1257
(r'#\[', Comment.Multiline, '#push'),
1258
(r'\]#', Comment.Multiline, '#pop'),
1259
(r'[\]#]', Comment.Multiline)
1263
class EvoqueHtmlLexer(DelegatingLexer):
1265
Subclass of the `EvoqueLexer` that highlights unlexed data with the
1268
*New in Pygments 1.1.*
1270
name = 'HTML+Evoque'
1271
aliases = ['html+evoque']
1272
filenames = ['*.html']
1273
mimetypes = ['text/html+evoque']
1275
def __init__(self, **options):
1276
super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
1279
class EvoqueXmlLexer(DelegatingLexer):
1281
Subclass of the `EvoqueLexer` that highlights unlexed data with the
1284
*New in Pygments 1.1.*
1287
aliases = ['xml+evoque']
1288
filenames = ['*.xml']
1289
mimetypes = ['application/xml+evoque']
1291
def __init__(self, **options):
1292
super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
1295
class ColdfusionLexer(RegexLexer):
1297
Coldfusion statements
1299
name = 'cfstatement'
1303
flags = re.IGNORECASE | re.MULTILINE
1308
(r'\+\+|--', Operator),
1309
(r'[-+*/^&=!]', Operator),
1310
(r'<=|>=|<|>', Operator),
1311
(r'mod\b', Operator),
1312
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
1313
(r'\|\||&&', Operator),
1314
(r'"', String.Double, 'string'),
1315
# There is a special rule for allowing html in single quoted
1316
# strings, evidently.
1317
(r"'.*?'", String.Single),
1319
(r'(if|else|len|var|case|default|break|switch)\b', Keyword),
1320
(r'([A-Za-z_$][A-Za-z0-9_.]*)\s*(\()', bygroups(Name.Function, Punctuation)),
1321
(r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
1322
(r'[()\[\]{};:,.\\]', Punctuation),
1326
(r'""', String.Double),
1327
(r'#.+?#', String.Interp),
1328
(r'[^"#]+', String.Double),
1329
(r'#', String.Double),
1330
(r'"', String.Double, '#pop'),
1334
class ColdfusionMarkupLexer(RegexLexer):
1336
Coldfusion markup only
1347
(r'<[^<>]*', Other),
1350
(r'(?s)<!---.*?--->', Comment.Multiline),
1351
(r'(?s)<!--.*?-->', Comment),
1352
(r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
1353
(r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
1354
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1355
# negative lookbehind is for strings with embedded >
1356
(r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
1357
r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
1358
r'mailpart|mail|header|content|zip|image|lock|argument|try|'
1359
r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
1360
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1364
(r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
1366
#(r'<cfoutput.*?>', Name.Builtin, '#push'),
1367
(r'</cfoutput.*?>', Name.Builtin, '#pop'),
1369
(r'(?s)<[^<>]*', Other),
1375
class ColdfusionHtmlLexer(DelegatingLexer):
1377
Coldfusion markup in html
1379
name = 'Coldufsion HTML'
1381
filenames = ['*.cfm', '*.cfml', '*.cfc']
1382
mimetypes = ['application/x-coldfusion']
1384
def __init__(self, **options):
1385
super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,