Coverage for pygments.formatters.other : 72%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# -*- coding: utf-8 -*- pygments.formatters.other ~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """
""" Output the text unchanged without any formatting. """
else:
r""" Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later be converted to a token stream with the `RawTokenLexer`, described in the :doc:`lexer list <lexers>`.
Only two options are accepted:
`compress` If set to ``'gz'`` or ``'bz2'``, compress the output with the given compression algorithm after encoding (default: ``''``). `error_color` If set to a color name, highlight error tokens using that color. If set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
# We ignore self.encoding if it is set, since it gets set for lexer # and formatter if given with -Oencoding on the command line. # The RawTokenFormatter outputs only ASCII. Override here. ['', 'none', 'gz', 'bz2'], '') self.error_color = 'red' try: colorize(self.error_color, '') except KeyError: raise ValueError("Invalid color %r specified" % self.error_color)
except TypeError: raise TypeError('The raw tokens formatter needs a binary ' 'output file') import gzip outfile = gzip.GzipFile('', 'wb', 9, outfile) def write(text): outfile.write(text.encode()) flush = outfile.flush import bz2 compressor = bz2.BZ2Compressor(9) def write(text): outfile.write(compressor.compress(text.encode())) def flush(): outfile.write(compressor.flush()) outfile.flush() else:
for ttype, value in tokensource: line = "%s\t%r\n" % (ttype, value) if ttype is Token.Error: write(colorize(self.error_color, line)) else: write(line) else:
def testNeedsName(self): fragment = %r tokens = [ ''' ] self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) '''
""" Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0 """
raise ValueError("Only None and utf-8 are allowed encodings.")
else: |