Update ffsubsync and srt module

* Update ffsubsync to 0.4.11
* Update srt to 3.4.1
This commit is contained in:
Michiel van Baak Jansen 2021-04-13 06:02:29 +02:00 committed by GitHub
parent 8e91beed83
commit 4a0932b5d3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
368 changed files with 134230 additions and 2096 deletions

View file

@ -2,18 +2,16 @@
:author:
Amine SEHILI <amine.sehili@gmail.com>
2015-2018
2015-2021
:License:
This package is published under GNU GPL Version 3.
This package is published under the MIT license.
"""
from __future__ import absolute_import
from .core import *
from .io import *
from .util import *
from . import dataset
from .exceptions import *
__version__ = "0.1.8"
__version__ = "0.2.0"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,126 @@
import sys
import logging
from collections import namedtuple
from . import workers
from .util import AudioDataSource
from .io import player_for
_AUDITOK_LOGGER = "AUDITOK_LOGGER"
KeywordArguments = namedtuple(
"KeywordArguments", ["io", "split", "miscellaneous"]
)
def make_kwargs(args_ns):
if args_ns.save_stream is None:
record = args_ns.plot or (args_ns.save_image is not None)
else:
record = False
try:
use_channel = int(args_ns.use_channel)
except (ValueError, TypeError):
use_channel = args_ns.use_channel
io_kwargs = {
"input": args_ns.input,
"audio_format": args_ns.input_format,
"max_read": args_ns.max_read,
"block_dur": args_ns.analysis_window,
"sampling_rate": args_ns.sampling_rate,
"sample_width": args_ns.sample_width,
"channels": args_ns.channels,
"use_channel": use_channel,
"save_stream": args_ns.save_stream,
"save_detections_as": args_ns.save_detections_as,
"export_format": args_ns.output_format,
"large_file": args_ns.large_file,
"frames_per_buffer": args_ns.frame_per_buffer,
"input_device_index": args_ns.input_device_index,
"record": record,
}
split_kwargs = {
"min_dur": args_ns.min_duration,
"max_dur": args_ns.max_duration,
"max_silence": args_ns.max_silence,
"drop_trailing_silence": args_ns.drop_trailing_silence,
"strict_min_dur": args_ns.strict_min_duration,
"energy_threshold": args_ns.energy_threshold,
}
miscellaneous = {
"echo": args_ns.echo,
"progress_bar": args_ns.progress_bar,
"command": args_ns.command,
"quiet": args_ns.quiet,
"printf": args_ns.printf,
"time_format": args_ns.time_format,
"timestamp_format": args_ns.timestamp_format,
}
return KeywordArguments(io_kwargs, split_kwargs, miscellaneous)
def make_logger(stderr=False, file=None, name=_AUDITOK_LOGGER):
if not stderr and file is None:
return None
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
if stderr:
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
if file is not None:
handler = logging.FileHandler(file, "w")
fmt = logging.Formatter("[%(asctime)s] | %(message)s")
handler.setFormatter(fmt)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
def initialize_workers(logger=None, **kwargs):
observers = []
reader = AudioDataSource(source=kwargs["input"], **kwargs)
if kwargs["save_stream"] is not None:
reader = workers.StreamSaverWorker(
reader,
filename=kwargs["save_stream"],
export_format=kwargs["export_format"],
)
reader.start()
if kwargs["save_detections_as"] is not None:
worker = workers.RegionSaverWorker(
kwargs["save_detections_as"],
kwargs["export_format"],
logger=logger,
)
observers.append(worker)
if kwargs["echo"]:
player = player_for(reader)
worker = workers.PlayerWorker(
player, progress_bar=kwargs["progress_bar"], logger=logger
)
observers.append(worker)
if kwargs["command"] is not None:
worker = workers.CommandLineWorker(
command=kwargs["command"], logger=logger
)
observers.append(worker)
if not kwargs["quiet"]:
print_format = (
kwargs["printf"]
.replace("\\n", "\n")
.replace("\\t", "\t")
.replace("\\r", "\r")
)
worker = workers.PrintWorker(
print_format, kwargs["time_format"], kwargs["timestamp_format"]
)
observers.append(worker)
return reader, observers

File diff suppressed because it is too large Load diff

View file

@ -1,18 +1,31 @@
"""
This module contains links to audio files you can use for test purposes.
This module contains links to audio files that can be used for test purposes.
.. autosummary::
:toctree: generated/
one_to_six_arabic_16000_mono_bc_noise
was_der_mensch_saet_mono_44100_lead_trail_silence
"""
import os
__all__ = ["one_to_six_arabic_16000_mono_bc_noise", "was_der_mensch_saet_mono_44100_lead_trail_silence"]
__all__ = [
"one_to_six_arabic_16000_mono_bc_noise",
"was_der_mensch_saet_mono_44100_lead_trail_silence",
]
_current_dir = os.path.dirname(os.path.realpath(__file__))
one_to_six_arabic_16000_mono_bc_noise = "{cd}{sep}data{sep}1to6arabic_\
16000_mono_bc_noise.wav".format(cd=_current_dir, sep=os.path.sep)
16000_mono_bc_noise.wav".format(
cd=_current_dir, sep=os.path.sep
)
"""A wave file that contains a pronunciation of Arabic numbers from 1 to 6"""
was_der_mensch_saet_mono_44100_lead_trail_silence = "{cd}{sep}data{sep}was_\
der_mensch_saet_das_wird_er_vielfach_ernten_44100Hz_mono_lead_trail_\
silence.wav".format(cd=_current_dir, sep=os.path.sep)
""" A wave file that contains a sentence between long leading and trailing periods of silence"""
silence.wav".format(
cd=_current_dir, sep=os.path.sep
)
"""A wave file that contains a sentence with a long leading and trailing silence"""

View file

@ -1,3 +1,41 @@
class DuplicateArgument(Exception):
pass
class TooSamllBlockDuration(ValueError):
"""Raised when block_dur results in a block_size smaller than one sample."""
def __init__(self, message, block_dur, sampling_rate):
self.block_dur = block_dur
self.sampling_rate = sampling_rate
super(TooSamllBlockDuration, self).__init__(message)
class TimeFormatError(Exception):
"""Raised when a duration formatting directive is unknown."""
class EndOfProcessing(Exception):
"""Raised within command line script's main function to jump to
postprocessing code."""
class AudioIOError(Exception):
"""Raised when a compressed audio file cannot be loaded or when trying
to read from a not yet open AudioSource"""
class AudioParameterError(AudioIOError):
"""Raised when one audio parameter is missing when loading raw data or
saving data to a format other than raw. Also raised when an audio
parameter has a wrong value."""
class AudioEncodingError(Exception):
"""Raised if audio data can not be encoded in the provided format"""
class AudioEncodingWarning(RuntimeWarning):
"""Raised if audio data can not be encoded in the provided format
but saved as wav.
"""

File diff suppressed because it is too large Load diff

150
libs/auditok/plotting.py Normal file
View file

@ -0,0 +1,150 @@
import matplotlib.pyplot as plt
import numpy as np
AUDITOK_PLOT_THEME = {
"figure": {"facecolor": "#482a36", "alpha": 0.2},
"plot": {"facecolor": "#282a36"},
"energy_threshold": {
"color": "#e31f8f",
"linestyle": "--",
"linewidth": 1,
},
"signal": {"color": "#40d970", "linestyle": "-", "linewidth": 1},
"detections": {
"facecolor": "#777777",
"edgecolor": "#ff8c1a",
"linewidth": 1,
"alpha": 0.75,
},
}
def _make_time_axis(nb_samples, sampling_rate):
sample_duration = 1 / sampling_rate
x = np.linspace(0, sample_duration * (nb_samples - 1), nb_samples)
return x
def _plot_line(x, y, theme, xlabel=None, ylabel=None, **kwargs):
color = theme.get("color", theme.get("c"))
ls = theme.get("linestyle", theme.get("ls"))
lw = theme.get("linewidth", theme.get("lw"))
plt.plot(x, y, c=color, ls=ls, lw=lw, **kwargs)
plt.xlabel(xlabel, fontsize=8)
plt.ylabel(ylabel, fontsize=8)
def _plot_detections(subplot, detections, theme):
fc = theme.get("facecolor", theme.get("fc"))
ec = theme.get("edgecolor", theme.get("ec"))
ls = theme.get("linestyle", theme.get("ls"))
lw = theme.get("linewidth", theme.get("lw"))
alpha = theme.get("alpha")
for (start, end) in detections:
subplot.axvspan(start, end, fc=fc, ec=ec, ls=ls, lw=lw, alpha=alpha)
def plot(
audio_region,
scale_signal=True,
detections=None,
energy_threshold=None,
show=True,
figsize=None,
save_as=None,
dpi=120,
theme="auditok",
):
y = np.asarray(audio_region)
if len(y.shape) == 1:
y = y.reshape(1, -1)
nb_subplots, nb_samples = y.shape
sampling_rate = audio_region.sampling_rate
time_axis = _make_time_axis(nb_samples, sampling_rate)
if energy_threshold is not None:
eth_log10 = energy_threshold * np.log(10) / 10
amplitude_threshold = np.sqrt(np.exp(eth_log10))
else:
amplitude_threshold = None
if detections is None:
detections = []
else:
# End of detection corresponds to the end of the last sample but
# to stay compatible with the time axis of signal plotting we want end
# of detection to correspond to the *start* of the that last sample.
detections = [
(start, end - (1 / sampling_rate)) for (start, end) in detections
]
if theme == "auditok":
theme = AUDITOK_PLOT_THEME
fig = plt.figure(figsize=figsize, dpi=dpi)
fig_theme = theme.get("figure", theme.get("fig", {}))
fig_fc = fig_theme.get("facecolor", fig_theme.get("ffc"))
fig_alpha = fig_theme.get("alpha", 1)
fig.patch.set_facecolor(fig_fc)
fig.patch.set_alpha(fig_alpha)
plot_theme = theme.get("plot", {})
plot_fc = plot_theme.get("facecolor", plot_theme.get("pfc"))
if nb_subplots > 2 and nb_subplots % 2 == 0:
nb_rows = nb_subplots // 2
nb_columns = 2
else:
nb_rows = nb_subplots
nb_columns = 1
for sid, samples in enumerate(y, 1):
ax = fig.add_subplot(nb_rows, nb_columns, sid)
ax.set_facecolor(plot_fc)
if scale_signal:
std = samples.std()
if std > 0:
mean = samples.mean()
std = samples.std()
samples = (samples - mean) / std
max_ = samples.max()
plt.ylim(-1.5 * max_, 1.5 * max_)
if amplitude_threshold is not None:
if scale_signal and std > 0:
amp_th = (amplitude_threshold - mean) / std
else:
amp_th = amplitude_threshold
eth_theme = theme.get("energy_threshold", theme.get("eth", {}))
_plot_line(
[time_axis[0], time_axis[-1]],
[amp_th] * 2,
eth_theme,
label="Detection threshold",
)
if sid == 1:
legend = plt.legend(
["Detection threshold"],
facecolor=fig_fc,
framealpha=0.1,
bbox_to_anchor=(0.0, 1.15, 1.0, 0.102),
loc=2,
)
legend = plt.gca().add_artist(legend)
signal_theme = theme.get("signal", {})
_plot_line(
time_axis,
samples,
signal_theme,
xlabel="Time (seconds)",
ylabel="Signal{}".format(" (scaled)" if scale_signal else ""),
)
detections_theme = theme.get("detections", {})
_plot_detections(ax, detections, detections_theme)
plt.title("Channel {}".format(sid), fontsize=10)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.tight_layout()
if save_as is not None:
plt.savefig(save_as, dpi=dpi)
if show:
plt.show()

179
libs/auditok/signal.py Normal file
View file

@ -0,0 +1,179 @@
"""
Module for basic audio signal processing and array operations.
.. autosummary::
:toctree: generated/
to_array
extract_single_channel
compute_average_channel
compute_average_channel_stereo
separate_channels
calculate_energy_single_channel
calculate_energy_multichannel
"""
from array import array as array_
import audioop
import math
FORMAT = {1: "b", 2: "h", 4: "i"}
_EPSILON = 1e-10
def to_array(data, sample_width, channels):
"""Extract individual channels of audio data and return a list of arrays of
numeric samples. This will always return a list of `array.array` objects
(one per channel) even if audio data is mono.
Parameters
----------
data : bytes
raw audio data.
sample_width : int
size in bytes of one audio sample (one channel considered).
Returns
-------
samples_arrays : list
list of arrays of audio samples.
"""
fmt = FORMAT[sample_width]
if channels == 1:
return [array_(fmt, data)]
return separate_channels(data, fmt, channels)
def extract_single_channel(data, fmt, channels, selected):
samples = array_(fmt, data)
return samples[selected::channels]
def compute_average_channel(data, fmt, channels):
"""
Compute and return average channel of multi-channel audio data. If the
number of channels is 2, use :func:`compute_average_channel_stereo` (much
faster). This function uses satandard `array` module to convert `bytes` data
into an array of numeric values.
Parameters
----------
data : bytes
multi-channel audio data to mix down.
fmt : str
format (single character) to pass to `array.array` to convert `data`
into an array of samples. This should be "b" if audio data's sample width
is 1, "h" if it's 2 and "i" if it's 4.
channels : int
number of channels of audio data.
Returns
-------
mono_audio : bytes
mixed down audio data.
"""
all_channels = array_(fmt, data)
mono_channels = [
array_(fmt, all_channels[ch::channels]) for ch in range(channels)
]
avg_arr = array_(
fmt,
(round(sum(samples) / channels) for samples in zip(*mono_channels)),
)
return avg_arr
def compute_average_channel_stereo(data, sample_width):
"""Compute and return average channel of stereo audio data. This function
should be used when the number of channels is exactly 2 because in that
case we can use standard `audioop` module which *much* faster then calling
:func:`compute_average_channel`.
Parameters
----------
data : bytes
2-channel audio data to mix down.
sample_width : int
size in bytes of one audio sample (one channel considered).
Returns
-------
mono_audio : bytes
mixed down audio data.
"""
fmt = FORMAT[sample_width]
arr = array_(fmt, audioop.tomono(data, sample_width, 0.5, 0.5))
return arr
def separate_channels(data, fmt, channels):
"""Create a list of arrays of audio samples (`array.array` objects), one for
each channel.
Parameters
----------
data : bytes
multi-channel audio data to mix down.
fmt : str
format (single character) to pass to `array.array` to convert `data`
into an array of samples. This should be "b" if audio data's sample width
is 1, "h" if it's 2 and "i" if it's 4.
channels : int
number of channels of audio data.
Returns
-------
channels_arr : list
list of audio channels, each as a standard `array.array`.
"""
all_channels = array_(fmt, data)
mono_channels = [
array_(fmt, all_channels[ch::channels]) for ch in range(channels)
]
return mono_channels
def calculate_energy_single_channel(data, sample_width):
"""Calculate the energy of mono audio data. Energy is computed as:
.. math:: energy = 20 \log(\sqrt({1}/{N}\sum_{i}^{N}{a_i}^2)) % # noqa: W605
where `a_i` is the i-th audio sample and `N` is the number of audio samples
in data.
Parameters
----------
data : bytes
single-channel audio data.
sample_width : int
size in bytes of one audio sample.
Returns
-------
energy : float
energy of audio signal.
"""
energy_sqrt = max(audioop.rms(data, sample_width), _EPSILON)
return 20 * math.log10(energy_sqrt)
def calculate_energy_multichannel(x, sample_width, aggregation_fn=max):
"""Calculate the energy of multi-channel audio data. Energy is calculated
channel-wise. An aggregation function is applied to the resulting energies
(default: `max`). Also see :func:`calculate_energy_single_channel`.
Parameters
----------
data : bytes
single-channel audio data.
sample_width : int
size in bytes of one audio sample (one channel considered).
aggregation_fn : callable, default: max
aggregation function to apply to the resulting per-channel energies.
Returns
-------
energy : float
aggregated energy of multi-channel audio signal.
"""
energies = (calculate_energy_single_channel(xi, sample_width) for xi in x)
return aggregation_fn(energies)

View file

@ -0,0 +1,30 @@
import numpy as np
from .signal import (
compute_average_channel_stereo,
calculate_energy_single_channel,
calculate_energy_multichannel,
)
FORMAT = {1: np.int8, 2: np.int16, 4: np.int32}
def to_array(data, sample_width, channels):
fmt = FORMAT[sample_width]
if channels == 1:
return np.frombuffer(data, dtype=fmt).astype(np.float64)
return separate_channels(data, fmt, channels).astype(np.float64)
def extract_single_channel(data, fmt, channels, selected):
samples = np.frombuffer(data, dtype=fmt)
return np.asanyarray(samples[selected::channels], order="C")
def compute_average_channel(data, fmt, channels):
array = np.frombuffer(data, dtype=fmt).astype(np.float64)
return array.reshape(-1, channels).mean(axis=1).round().astype(fmt)
def separate_channels(data, fmt, channels):
array = np.frombuffer(data, dtype=fmt)
return np.asanyarray(array.reshape(-1, channels).T, order="C")

File diff suppressed because it is too large Load diff

427
libs/auditok/workers.py Normal file
View file

@ -0,0 +1,427 @@
import os
import sys
from tempfile import NamedTemporaryFile
from abc import ABCMeta, abstractmethod
from threading import Thread
from datetime import datetime, timedelta
from collections import namedtuple
import wave
import subprocess
from queue import Queue, Empty
from .io import _guess_audio_format
from .util import AudioDataSource, make_duration_formatter
from .core import split
from .exceptions import (
EndOfProcessing,
AudioEncodingError,
AudioEncodingWarning,
)
_STOP_PROCESSING = "STOP_PROCESSING"
_Detection = namedtuple("_Detection", "id start end duration")
def _run_subprocess(command):
try:
with subprocess.Popen(
command,
stdin=open(os.devnull, "rb"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
stdout, stderr = proc.communicate()
return proc.returncode, stdout, stderr
except Exception:
err_msg = "Couldn't export audio using command: '{}'".format(command)
raise AudioEncodingError(err_msg)
class Worker(Thread, metaclass=ABCMeta):
def __init__(self, timeout=0.5, logger=None):
self._timeout = timeout
self._logger = logger
self._inbox = Queue()
Thread.__init__(self)
def run(self):
while True:
message = self._get_message()
if message == _STOP_PROCESSING:
break
if message is not None:
self._process_message(message)
self._post_process()
@abstractmethod
def _process_message(self, message):
"""Process incoming messages"""
def _post_process(self):
pass
def _log(self, message):
self._logger.info(message)
def _stop_requested(self):
try:
message = self._inbox.get_nowait()
if message == _STOP_PROCESSING:
return True
except Empty:
return False
def stop(self):
self.send(_STOP_PROCESSING)
self.join()
def send(self, message):
self._inbox.put(message)
def _get_message(self):
try:
message = self._inbox.get(timeout=self._timeout)
return message
except Empty:
return None
class TokenizerWorker(Worker, AudioDataSource):
def __init__(self, reader, observers=None, logger=None, **kwargs):
self._observers = observers if observers is not None else []
self._reader = reader
self._audio_region_gen = split(self, **kwargs)
self._detections = []
self._log_format = "[DET]: Detection {0.id} (start: {0.start:.3f}, "
self._log_format += "end: {0.end:.3f}, duration: {0.duration:.3f})"
Worker.__init__(self, timeout=0.2, logger=logger)
def _process_message(self):
pass
@property
def detections(self):
return self._detections
def _notify_observers(self, message):
for observer in self._observers:
observer.send(message)
def run(self):
self._reader.open()
start_processing_timestamp = datetime.now()
for _id, audio_region in enumerate(self._audio_region_gen, start=1):
timestamp = start_processing_timestamp + timedelta(
seconds=audio_region.meta.start
)
audio_region.meta.timestamp = timestamp
detection = _Detection(
_id,
audio_region.meta.start,
audio_region.meta.end,
audio_region.duration,
)
self._detections.append(detection)
if self._logger is not None:
message = self._log_format.format(detection)
self._log(message)
self._notify_observers((_id, audio_region))
self._notify_observers(_STOP_PROCESSING)
self._reader.close()
def start_all(self):
for observer in self._observers:
observer.start()
self.start()
def stop_all(self):
self.stop()
for observer in self._observers:
observer.stop()
self._reader.close()
def read(self):
if self._stop_requested():
return None
else:
return self._reader.read()
def __getattr__(self, name):
return getattr(self._reader, name)
class StreamSaverWorker(Worker):
def __init__(
self,
audio_reader,
filename,
export_format=None,
cache_size_sec=0.5,
timeout=0.2,
):
self._reader = audio_reader
sample_size_bytes = self._reader.sw * self._reader.ch
self._cache_size = cache_size_sec * self._reader.sr * sample_size_bytes
self._output_filename = filename
self._export_format = _guess_audio_format(export_format, filename)
if self._export_format is None:
self._export_format = "wav"
self._init_output_stream()
self._exported = False
self._cache = []
self._total_cached = 0
Worker.__init__(self, timeout=timeout)
def _get_non_existent_filename(self):
filename = self._output_filename + ".wav"
i = 0
while os.path.exists(filename):
i += 1
filename = self._output_filename + "({}).wav".format(i)
return filename
def _init_output_stream(self):
if self._export_format != "wav":
self._tmp_output_filename = self._get_non_existent_filename()
else:
self._tmp_output_filename = self._output_filename
self._wfp = wave.open(self._tmp_output_filename, "wb")
self._wfp.setframerate(self._reader.sr)
self._wfp.setsampwidth(self._reader.sw)
self._wfp.setnchannels(self._reader.ch)
@property
def sr(self):
return self._reader.sampling_rate
@property
def sw(self):
return self._reader.sample_width
@property
def ch(self):
return self._reader.channels
def __del__(self):
self._post_process()
if (
(self._tmp_output_filename != self._output_filename)
and self._exported
and os.path.exists(self._tmp_output_filename)
):
os.remove(self._tmp_output_filename)
def _process_message(self, data):
self._cache.append(data)
self._total_cached += len(data)
if self._total_cached >= self._cache_size:
self._write_cached_data()
def _post_process(self):
while True:
try:
data = self._inbox.get_nowait()
if data != _STOP_PROCESSING:
self._cache.append(data)
self._total_cached += len(data)
except Empty:
break
self._write_cached_data()
self._wfp.close()
def _write_cached_data(self):
if self._cache:
data = b"".join(self._cache)
self._wfp.writeframes(data)
self._cache = []
self._total_cached = 0
def open(self):
self._reader.open()
def close(self):
self._reader.close()
self.stop()
def rewind(self):
# ensure compatibility with AudioDataSource with record=True
pass
@property
def data(self):
with wave.open(self._tmp_output_filename, "rb") as wfp:
return wfp.readframes(-1)
def save_stream(self):
if self._exported:
return self._output_filename
if self._export_format in ("raw", "wav"):
if self._export_format == "raw":
self._export_raw()
self._exported = True
return self._output_filename
try:
self._export_with_ffmpeg_or_avconv()
except AudioEncodingError:
try:
self._export_with_sox()
except AudioEncodingError:
warn_msg = "Couldn't save audio data in the desired format "
warn_msg += "'{}'. Either none of 'ffmpeg', 'avconv' or 'sox' "
warn_msg += "is installed or this format is not recognized.\n"
warn_msg += "Audio file was saved as '{}'"
raise AudioEncodingWarning(
warn_msg.format(
self._export_format, self._tmp_output_filename
)
)
finally:
self._exported = True
return self._output_filename
def _export_raw(self):
with open(self._output_filename, "wb") as wfp:
wfp.write(self.data)
def _export_with_ffmpeg_or_avconv(self):
command = [
"-y",
"-f",
"wav",
"-i",
self._tmp_output_filename,
"-f",
self._export_format,
self._output_filename,
]
returncode, stdout, stderr = _run_subprocess(["ffmpeg"] + command)
if returncode != 0:
returncode, stdout, stderr = _run_subprocess(["avconv"] + command)
if returncode != 0:
raise AudioEncodingError(stderr)
return stdout, stderr
def _export_with_sox(self):
command = [
"sox",
"-t",
"wav",
self._tmp_output_filename,
self._output_filename,
]
returncode, stdout, stderr = _run_subprocess(command)
if returncode != 0:
raise AudioEncodingError(stderr)
return stdout, stderr
def close_output(self):
self._wfp.close()
def read(self):
data = self._reader.read()
if data is not None:
self.send(data)
else:
self.send(_STOP_PROCESSING)
return data
def __getattr__(self, name):
if name == "data":
return self.data
return getattr(self._reader, name)
class PlayerWorker(Worker):
def __init__(self, player, progress_bar=False, timeout=0.2, logger=None):
self._player = player
self._progress_bar = progress_bar
self._log_format = "[PLAY]: Detection {id} played"
Worker.__init__(self, timeout=timeout, logger=logger)
def _process_message(self, message):
_id, audio_region = message
if self._logger is not None:
message = self._log_format.format(id=_id)
self._log(message)
audio_region.play(
player=self._player, progress_bar=self._progress_bar, leave=False
)
class RegionSaverWorker(Worker):
def __init__(
self,
filename_format,
audio_format=None,
timeout=0.2,
logger=None,
**audio_parameters
):
self._filename_format = filename_format
self._audio_format = audio_format
self._audio_parameters = audio_parameters
self._debug_format = "[SAVE]: Detection {id} saved as '{filename}'"
Worker.__init__(self, timeout=timeout, logger=logger)
def _process_message(self, message):
_id, audio_region = message
filename = self._filename_format.format(
id=_id,
start=audio_region.meta.start,
end=audio_region.meta.end,
duration=audio_region.duration,
)
filename = audio_region.save(
filename, self._audio_format, **self._audio_parameters
)
if self._logger:
message = self._debug_format.format(id=_id, filename=filename)
self._log(message)
class CommandLineWorker(Worker):
def __init__(self, command, timeout=0.2, logger=None):
self._command = command
Worker.__init__(self, timeout=timeout, logger=logger)
self._debug_format = "[COMMAND]: Detection {id} command: '{command}'"
def _process_message(self, message):
_id, audio_region = message
with NamedTemporaryFile(delete=False) as file:
filename = audio_region.save(file.name, audio_format="wav")
command = self._command.format(file=filename)
os.system(command)
if self._logger is not None:
message = self._debug_format.format(id=_id, command=command)
self._log(message)
class PrintWorker(Worker):
def __init__(
self,
print_format="{start} {end}",
time_format="%S",
timestamp_format="%Y/%m/%d %H:%M:%S.%f",
timeout=0.2,
):
self._print_format = print_format
self._format_time = make_duration_formatter(time_format)
self._timestamp_format = timestamp_format
self.detections = []
Worker.__init__(self, timeout=timeout)
def _process_message(self, message):
_id, audio_region = message
timestamp = audio_region.meta.timestamp
timestamp = timestamp.strftime(self._timestamp_format)
text = self._print_format.format(
id=_id,
start=self._format_time(audio_region.meta.start),
end=self._format_time(audio_region.meta.end),
duration=self._format_time(audio_region.duration),
timestamp=timestamp,
)
print(text)

View file

@ -0,0 +1,6 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit, colorama_text
from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32
__version__ = '0.4.4'

102
libs/colorama/ansi.py Normal file
View file

@ -0,0 +1,102 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\a'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()

View file

@ -0,0 +1,258 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __enter__(self, *args, **kwargs):
# special method lookup bypasses __getattr__/__getattribute__, see
# https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
# thus, contextlib magic methods are not proxied via __getattr__
return self.__wrapped.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
return self.__wrapped.__exit__(*args, **kwargs)
def write(self, text):
self.__convertor.write(text)
def isatty(self):
stream = self.__wrapped
if 'PYCHARM_HOSTED' in os.environ:
if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
return True
try:
stream_isatty = stream.isatty
except AttributeError:
return False
else:
return stream_isatty()
@property
def closed(self):
stream = self.__wrapped
try:
return stream.closed
except AttributeError:
return True
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not self.stream.closed and self.stream.isatty()
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not self.stream.closed:
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command == BEL:
if paramstring.count(";") == 1:
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text

View file

@ -0,0 +1,80 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = None
orig_stderr = None
wrapped_stdout = None
wrapped_stderr = None
atexit_done = False
def reset_all():
if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
global orig_stdout, orig_stderr
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
@contextlib.contextmanager
def colorama_text(*args, **kwargs):
init(*args, **kwargs)
try:
yield
finally:
deinit()
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream

152
libs/colorama/win32.py Normal file
View file

@ -0,0 +1,152 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
def _winapi_test(handle):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = _GetStdHandle(stream_id)
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = _GetStdHandle(stream_id)
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)

169
libs/colorama/winterm.py Normal file
View file

@ -0,0 +1,169 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)

View file

@ -0,0 +1,8 @@
# flake8: noqa
from __future__ import unicode_literals, absolute_import
from commonmark.main import commonmark
from commonmark.dump import dumpAST, dumpJSON
from commonmark.blocks import Parser
from commonmark.render.html import HtmlRenderer
from commonmark.render.rst import ReStructuredTextRenderer

908
libs/commonmark/blocks.py Normal file
View file

@ -0,0 +1,908 @@
from __future__ import absolute_import, unicode_literals
import re
from commonmark import common
from commonmark.common import unescape_string
from commonmark.inlines import InlineParser
from commonmark.node import Node
CODE_INDENT = 4
reHtmlBlockOpen = [
re.compile(r'.'), # dummy for 0
re.compile(r'^<(?:script|pre|style)(?:\s|>|$)', re.IGNORECASE),
re.compile(r'^<!--'),
re.compile(r'^<[?]'),
re.compile(r'^<![A-Z]'),
re.compile(r'^<!\[CDATA\['),
re.compile(
r'^<[/]?(?:address|article|aside|base|basefont|blockquote|body|'
r'caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|'
r'fieldset|figcaption|figure|footer|form|frame|frameset|h1|head|'
r'header|hr|html|iframe|legend|li|link|main|menu|menuitem|'
r'nav|noframes|ol|optgroup|option|p|param|section|source|title|'
r'summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)'
r'(?:\s|[/]?[>]|$)',
re.IGNORECASE),
re.compile(
'^(?:' + common.OPENTAG + '|' + common.CLOSETAG + ')\\s*$',
re.IGNORECASE),
]
reHtmlBlockClose = [
re.compile(r'.'), # dummy for 0
re.compile(r'<\/(?:script|pre|style)>', re.IGNORECASE),
re.compile(r'-->'),
re.compile(r'\?>'),
re.compile(r'>'),
re.compile(r'\]\]>'),
]
reThematicBreak = re.compile(
r'^(?:(?:\*[ \t]*){3,}|(?:_[ \t]*){3,}|(?:-[ \t]*){3,})[ \t]*$')
reMaybeSpecial = re.compile(r'^[#`~*+_=<>0-9-]')
reNonSpace = re.compile(r'[^ \t\f\v\r\n]')
reBulletListMarker = re.compile(r'^[*+-]')
reOrderedListMarker = re.compile(r'^(\d{1,9})([.)])')
reATXHeadingMarker = re.compile(r'^#{1,6}(?:[ \t]+|$)')
reCodeFence = re.compile(r'^`{3,}(?!.*`)|^~{3,}')
reClosingCodeFence = re.compile(r'^(?:`{3,}|~{3,})(?= *$)')
reSetextHeadingLine = re.compile(r'^(?:=+|-+)[ \t]*$')
reLineEnding = re.compile(r'\r\n|\n|\r')
def is_blank(s):
"""Returns True if string contains only space characters."""
return re.search(reNonSpace, s) is None
def is_space_or_tab(s):
return s in (' ', '\t')
def peek(ln, pos):
if pos < len(ln):
return ln[pos]
else:
return None
def ends_with_blank_line(block):
""" Returns true if block ends with a blank line,
descending if needed into lists and sublists."""
while block:
if block.last_line_blank:
return True
if not block.last_line_checked and \
block.t in ('list', 'item'):
block.last_line_checked = True
block = block.last_child
else:
block.last_line_checked = True
break
return False
def parse_list_marker(parser, container):
""" Parse a list marker and return data on the marker (type,
start, delimiter, bullet character, padding) or None."""
rest = parser.current_line[parser.next_nonspace:]
data = {
'type': None,
'tight': True, # lists are tight by default
'bullet_char': None,
'start': None,
'delimiter': None,
'padding': None,
'marker_offset': parser.indent,
}
if parser.indent >= 4:
return None
m = re.search(reBulletListMarker, rest)
m2 = re.search(reOrderedListMarker, rest)
if m:
data['type'] = 'bullet'
data['bullet_char'] = m.group()[0]
elif m2 and (container.t != 'paragraph' or m2.group(1) == '1'):
m = m2
data['type'] = 'ordered'
data['start'] = int(m.group(1))
data['delimiter'] = m.group(2)
else:
return None
# make sure we have spaces after
nextc = peek(parser.current_line, parser.next_nonspace + len(m.group()))
if not (nextc is None or nextc == '\t' or nextc == ' '):
return None
# if it interrupts paragraph, make sure first line isn't blank
if container.t == 'paragraph' and \
not re.search(
reNonSpace,
parser.current_line[parser.next_nonspace + len(m.group()):]):
return None
# we've got a match! advance offset and calculate padding
parser.advance_next_nonspace() # to start of marker
parser.advance_offset(len(m.group()), True) # to end of marker
spaces_start_col = parser.column
spaces_start_offset = parser.offset
while True:
parser.advance_offset(1, True)
nextc = peek(parser.current_line, parser.offset)
if parser.column - spaces_start_col < 5 and \
is_space_or_tab(nextc):
pass
else:
break
blank_item = peek(parser.current_line, parser.offset) is None
spaces_after_marker = parser.column - spaces_start_col
if spaces_after_marker >= 5 or \
spaces_after_marker < 1 or \
blank_item:
data['padding'] = len(m.group()) + 1
parser.column = spaces_start_col
parser.offset = spaces_start_offset
if is_space_or_tab(peek(parser.current_line, parser.offset)):
parser.advance_offset(1, True)
else:
data['padding'] = len(m.group()) + spaces_after_marker
return data
def lists_match(list_data, item_data):
"""
Returns True if the two list items are of the same type,
with the same delimiter and bullet character. This is used
in agglomerating list items into lists.
"""
return list_data.get('type') == item_data.get('type') and \
list_data.get('delimiter') == item_data.get('delimiter') and \
list_data.get('bullet_char') == item_data.get('bullet_char')
class Block(object):
accepts_lines = None
@staticmethod
def continue_(parser=None, container=None):
return
@staticmethod
def finalize(parser=None, block=None):
return
@staticmethod
def can_contain(t):
return
class Document(Block):
accepts_lines = False
@staticmethod
def continue_(parser=None, container=None):
return 0
@staticmethod
def finalize(parser=None, block=None):
return
@staticmethod
def can_contain(t):
return t != 'item'
class List(Block):
accepts_lines = False
@staticmethod
def continue_(parser=None, container=None):
return 0
@staticmethod
def finalize(parser=None, block=None):
item = block.first_child
while item:
# check for non-final list item ending with blank line:
if ends_with_blank_line(item) and item.nxt:
block.list_data['tight'] = False
break
# recurse into children of list item, to see if there are
# spaces between any of them:
subitem = item.first_child
while subitem:
if ends_with_blank_line(subitem) and \
(item.nxt or subitem.nxt):
block.list_data['tight'] = False
break
subitem = subitem.nxt
item = item.nxt
@staticmethod
def can_contain(t):
return t == 'item'
class BlockQuote(Block):
accepts_lines = False
@staticmethod
def continue_(parser=None, container=None):
ln = parser.current_line
if not parser.indented and peek(ln, parser.next_nonspace) == '>':
parser.advance_next_nonspace()
parser.advance_offset(1, False)
if is_space_or_tab(peek(ln, parser.offset)):
parser.advance_offset(1, True)
else:
return 1
return 0
@staticmethod
def finalize(parser=None, block=None):
return
@staticmethod
def can_contain(t):
return t != 'item'
class Item(Block):
accepts_lines = False
@staticmethod
def continue_(parser=None, container=None):
if parser.blank:
if container.first_child is None:
# Blank line after empty list item
return 1
else:
parser.advance_next_nonspace()
elif parser.indent >= (container.list_data['marker_offset'] +
container.list_data['padding']):
parser.advance_offset(
container.list_data['marker_offset'] +
container.list_data['padding'], True)
else:
return 1
return 0
@staticmethod
def finalize(parser=None, block=None):
return
@staticmethod
def can_contain(t):
return t != 'item'
class Heading(Block):
accepts_lines = False
@staticmethod
def continue_(parser=None, container=None):
# A heading can never container > 1 line, so fail to match:
return 1
@staticmethod
def finalize(parser=None, block=None):
return
@staticmethod
def can_contain(t):
return False
class ThematicBreak(Block):
accepts_lines = False
@staticmethod
def continue_(parser=None, container=None):
# A thematic break can never container > 1 line, so fail to match:
return 1
@staticmethod
def finalize(parser=None, block=None):
return
@staticmethod
def can_contain(t):
return False
class CodeBlock(Block):
accepts_lines = True
@staticmethod
def continue_(parser=None, container=None):
ln = parser.current_line
indent = parser.indent
if container.is_fenced:
match = indent <= 3 and \
len(ln) >= parser.next_nonspace + 1 and \
ln[parser.next_nonspace] == container.fence_char and \
re.search(reClosingCodeFence, ln[parser.next_nonspace:])
if match and len(match.group()) >= container.fence_length:
# closing fence - we're at end of line, so we can return
parser.finalize(container, parser.line_number)
return 2
else:
# skip optional spaces of fence offset
i = container.fence_offset
while i > 0 and is_space_or_tab(peek(ln, parser.offset)):
parser.advance_offset(1, True)
i -= 1
else:
# indented
if indent >= CODE_INDENT:
parser.advance_offset(CODE_INDENT, True)
elif parser.blank:
parser.advance_next_nonspace()
else:
return 1
return 0
@staticmethod
def finalize(parser=None, block=None):
if block.is_fenced:
# first line becomes info string
content = block.string_content
newline_pos = content.index('\n')
first_line = content[0:newline_pos]
rest = content[newline_pos + 1:]
block.info = unescape_string(first_line.strip())
block.literal = rest
else:
# indented
block.literal = re.sub(r'(\n *)+$', '\n', block.string_content)
block.string_content = None
@staticmethod
def can_contain(t):
return False
class HtmlBlock(Block):
accepts_lines = True
@staticmethod
def continue_(parser=None, container=None):
if parser.blank and (container.html_block_type == 6 or
container.html_block_type == 7):
return 1
else:
return 0
@staticmethod
def finalize(parser=None, block=None):
block.literal = re.sub(r'(\n *)+$', '', block.string_content)
# allow GC
block.string_content = None
@staticmethod
def can_contain(t):
return False
class Paragraph(Block):
accepts_lines = True
@staticmethod
def continue_(parser=None, container=None):
return 1 if parser.blank else 0
@staticmethod
def finalize(parser=None, block=None):
has_reference_defs = False
# try parsing the beginning as link reference definitions:
while peek(block.string_content, 0) == '[':
pos = parser.inline_parser.parseReference(
block.string_content, parser.refmap)
if not pos:
break
block.string_content = block.string_content[pos:]
has_reference_defs = True
if has_reference_defs and is_blank(block.string_content):
block.unlink()
@staticmethod
def can_contain(t):
return False
class BlockStarts(object):
"""Block start functions.
Return values:
0 = no match
1 = matched container, keep going
2 = matched leaf, no more block starts
"""
METHODS = [
'block_quote',
'atx_heading',
'fenced_code_block',
'html_block',
'setext_heading',
'thematic_break',
'list_item',
'indented_code_block',
]
@staticmethod
def block_quote(parser, container=None):
if not parser.indented and \
peek(parser.current_line, parser.next_nonspace) == '>':
parser.advance_next_nonspace()
parser.advance_offset(1, False)
# optional following space
if is_space_or_tab(peek(parser.current_line, parser.offset)):
parser.advance_offset(1, True)
parser.close_unmatched_blocks()
parser.add_child('block_quote', parser.next_nonspace)
return 1
return 0
@staticmethod
def atx_heading(parser, container=None):
if not parser.indented:
m = re.search(reATXHeadingMarker,
parser.current_line[parser.next_nonspace:])
if m:
parser.advance_next_nonspace()
parser.advance_offset(len(m.group()), False)
parser.close_unmatched_blocks()
container = parser.add_child('heading', parser.next_nonspace)
# number of #s
container.level = len(m.group().strip())
# remove trailing ###s:
container.string_content = re.sub(
r'[ \t]+#+[ \t]*$', '', re.sub(
r'^[ \t]*#+[ \t]*$',
'',
parser.current_line[parser.offset:]))
parser.advance_offset(
len(parser.current_line) - parser.offset, False)
return 2
return 0
@staticmethod
def fenced_code_block(parser, container=None):
if not parser.indented:
m = re.search(
reCodeFence,
parser.current_line[parser.next_nonspace:])
if m:
fence_length = len(m.group())
parser.close_unmatched_blocks()
container = parser.add_child(
'code_block', parser.next_nonspace)
container.is_fenced = True
container.fence_length = fence_length
container.fence_char = m.group()[0]
container.fence_offset = parser.indent
parser.advance_next_nonspace()
parser.advance_offset(fence_length, False)
return 2
return 0
@staticmethod
def html_block(parser, container=None):
if not parser.indented and \
peek(parser.current_line, parser.next_nonspace) == '<':
s = parser.current_line[parser.next_nonspace:]
for block_type in range(1, 8):
if re.search(reHtmlBlockOpen[block_type], s) and \
(block_type < 7 or container.t != 'paragraph'):
parser.close_unmatched_blocks()
# We don't adjust parser.offset;
# spaces are part of the HTML block:
b = parser.add_child('html_block', parser.offset)
b.html_block_type = block_type
return 2
return 0
@staticmethod
def setext_heading(parser, container=None):
if not parser.indented and container.t == 'paragraph':
m = re.search(
reSetextHeadingLine,
parser.current_line[parser.next_nonspace:])
if m:
parser.close_unmatched_blocks()
# resolve reference link definitiosn
while peek(container.string_content, 0) == '[':
pos = parser.inline_parser.parseReference(
container.string_content, parser.refmap)
if not pos:
break
container.string_content = container.string_content[pos:]
if container.string_content:
heading = Node('heading', container.sourcepos)
heading.level = 1 if m.group()[0] == '=' else 2
heading.string_content = container.string_content
container.insert_after(heading)
container.unlink()
parser.tip = heading
parser.advance_offset(
len(parser.current_line) - parser.offset, False)
return 2
else:
return 0
return 0
@staticmethod
def thematic_break(parser, container=None):
if not parser.indented and re.search(
reThematicBreak, parser.current_line[parser.next_nonspace:]):
parser.close_unmatched_blocks()
parser.add_child('thematic_break', parser.next_nonspace)
parser.advance_offset(
len(parser.current_line) - parser.offset, False)
return 2
return 0
@staticmethod
def list_item(parser, container=None):
if (not parser.indented or container.t == 'list'):
data = parse_list_marker(parser, container)
if data:
parser.close_unmatched_blocks()
# add the list if needed
if parser.tip.t != 'list' or \
not lists_match(container.list_data, data):
container = parser.add_child('list', parser.next_nonspace)
container.list_data = data
# add the list item
container = parser.add_child('item', parser.next_nonspace)
container.list_data = data
return 1
return 0
@staticmethod
def indented_code_block(parser, container=None):
if parser.indented and \
parser.tip.t != 'paragraph' and \
not parser.blank:
# indented code
parser.advance_offset(CODE_INDENT, True)
parser.close_unmatched_blocks()
parser.add_child('code_block', parser.offset)
return 2
return 0
class Parser(object):
def __init__(self, options={}):
self.doc = Node('document', [[1, 1], [0, 0]])
self.block_starts = BlockStarts()
self.tip = self.doc
self.oldtip = self.doc
self.current_line = ''
self.line_number = 0
self.offset = 0
self.column = 0
self.next_nonspace = 0
self.next_nonspace_column = 0
self.indent = 0
self.indented = False
self.blank = False
self.partially_consumed_tab = False
self.all_closed = True
self.last_matched_container = self.doc
self.refmap = {}
self.last_line_length = 0
self.inline_parser = InlineParser(options)
self.options = options
def add_line(self):
""" Add a line to the block at the tip. We assume the tip
can accept lines -- that check should be done before calling this."""
if self.partially_consumed_tab:
# Skip over tab
self.offset += 1
# Add space characters
chars_to_tab = 4 - (self.column % 4)
self.tip.string_content += (' ' * chars_to_tab)
self.tip.string_content += (self.current_line[self.offset:] + '\n')
def add_child(self, tag, offset):
""" Add block of type tag as a child of the tip. If the tip can't
accept children, close and finalize it and try its parent,
and so on til we find a block that can accept children."""
while not self.blocks[self.tip.t].can_contain(tag):
self.finalize(self.tip, self.line_number - 1)
column_number = offset + 1
new_block = Node(tag, [[self.line_number, column_number], [0, 0]])
new_block.string_content = ''
self.tip.append_child(new_block)
self.tip = new_block
return new_block
def close_unmatched_blocks(self):
"""Finalize and close any unmatched blocks."""
if not self.all_closed:
while self.oldtip != self.last_matched_container:
parent = self.oldtip.parent
self.finalize(self.oldtip, self.line_number - 1)
self.oldtip = parent
self.all_closed = True
def find_next_nonspace(self):
current_line = self.current_line
i = self.offset
cols = self.column
try:
c = current_line[i]
except IndexError:
c = ''
while c != '':
if c == ' ':
i += 1
cols += 1
elif c == '\t':
i += 1
cols += (4 - (cols % 4))
else:
break
try:
c = current_line[i]
except IndexError:
c = ''
self.blank = (c == '\n' or c == '\r' or c == '')
self.next_nonspace = i
self.next_nonspace_column = cols
self.indent = self.next_nonspace_column - self.column
self.indented = self.indent >= CODE_INDENT
def advance_next_nonspace(self):
self.offset = self.next_nonspace
self.column = self.next_nonspace_column
self.partially_consumed_tab = False
def advance_offset(self, count, columns):
current_line = self.current_line
try:
c = current_line[self.offset]
except IndexError:
c = None
while count > 0 and c is not None:
if c == '\t':
chars_to_tab = 4 - (self.column % 4)
if columns:
self.partially_consumed_tab = chars_to_tab > count
chars_to_advance = min(count, chars_to_tab)
self.column += chars_to_advance
self.offset += 0 if self.partially_consumed_tab else 1
count -= chars_to_advance
else:
self.partially_consumed_tab = False
self.column += chars_to_tab
self.offset += 1
count -= 1
else:
self.partially_consumed_tab = False
self.offset += 1
# assume ascii; block starts are ascii
self.column += 1
count -= 1
try:
c = current_line[self.offset]
except IndexError:
c = None
def incorporate_line(self, ln):
"""Analyze a line of text and update the document appropriately.
We parse markdown text by calling this on each line of input,
then finalizing the document.
"""
all_matched = True
container = self.doc
self.oldtip = self.tip
self.offset = 0
self.column = 0
self.blank = False
self.partially_consumed_tab = False
self.line_number += 1
# replace NUL characters for security
if re.search(r'\u0000', ln) is not None:
ln = re.sub(r'\0', '\uFFFD', ln)
self.current_line = ln
# For each containing block, try to parse the associated line start.
# Bail out on failure: container will point to the last matching block.
# Set all_matched to false if not all containers match.
while True:
last_child = container.last_child
if not (last_child and last_child.is_open):
break
container = last_child
self.find_next_nonspace()
rv = self.blocks[container.t].continue_(self, container)
if rv == 0:
# we've matched, keep going
pass
elif rv == 1:
# we've failed to match a block
all_matched = False
elif rv == 2:
# we've hit end of line for fenced code close and can return
self.last_line_length = len(ln)
return
else:
raise ValueError(
'continue_ returned illegal value, must be 0, 1, or 2')
if not all_matched:
# back up to last matching block
container = container.parent
break
self.all_closed = (container == self.oldtip)
self.last_matched_container = container
matched_leaf = container.t != 'paragraph' and \
self.blocks[container.t].accepts_lines
starts = self.block_starts
starts_len = len(starts.METHODS)
# Unless last matched container is a code block, try new container
# starts, adding children to the last matched container:
while not matched_leaf:
self.find_next_nonspace()
# this is a little performance optimization:
if not self.indented and \
not re.search(reMaybeSpecial, ln[self.next_nonspace:]):
self.advance_next_nonspace()
break
i = 0
while i < starts_len:
res = getattr(starts, starts.METHODS[i])(self, container)
if res == 1:
container = self.tip
break
elif res == 2:
container = self.tip
matched_leaf = True
break
else:
i += 1
if i == starts_len:
# nothing matched
self.advance_next_nonspace()
break
# What remains at the offset is a text line. Add the text to the
# appropriate container.
if not self.all_closed and not self.blank and \
self.tip.t == 'paragraph':
# lazy paragraph continuation
self.add_line()
else:
# not a lazy continuation
# finalize any blocks not matched
self.close_unmatched_blocks()
if self.blank and container.last_child:
container.last_child.last_line_blank = True
t = container.t
# Block quote lines are never blank as they start with >
# and we don't count blanks in fenced code for purposes of
# tight/loose lists or breaking out of lists. We also
# don't set last_line_blank on an empty list item, or if we
# just closed a fenced block.
last_line_blank = self.blank and \
not (t == 'block_quote' or
(t == 'code_block' and container.is_fenced) or
(t == 'item' and
not container.first_child and
container.sourcepos[0][0] == self.line_number))
# propagate last_line_blank up through parents:
cont = container
while cont:
cont.last_line_blank = last_line_blank
cont = cont.parent
if self.blocks[t].accepts_lines:
self.add_line()
# if HtmlBlock, check for end condition
if t == 'html_block' and \
container.html_block_type >= 1 and \
container.html_block_type <= 5 and \
re.search(
reHtmlBlockClose[container.html_block_type],
self.current_line[self.offset:]):
self.finalize(container, self.line_number)
elif self.offset < len(ln) and not self.blank:
# create a paragraph container for one line
container = self.add_child('paragraph', self.offset)
self.advance_next_nonspace()
self.add_line()
self.last_line_length = len(ln)
def finalize(self, block, line_number):
""" Finalize a block. Close it and do any necessary postprocessing,
e.g. creating string_content from strings, setting the 'tight'
or 'loose' status of a list, and parsing the beginnings
of paragraphs for reference definitions. Reset the tip to the
parent of the closed block."""
above = block.parent
block.is_open = False
block.sourcepos[1] = [line_number, self.last_line_length]
self.blocks[block.t].finalize(self, block)
self.tip = above
def process_inlines(self, block):
"""
Walk through a block & children recursively, parsing string content
into inline content where appropriate.
"""
walker = block.walker()
self.inline_parser.refmap = self.refmap
self.inline_parser.options = self.options
event = walker.nxt()
while event is not None:
node = event['node']
t = node.t
if not event['entering'] and (t == 'paragraph' or t == 'heading'):
self.inline_parser.parse(node)
event = walker.nxt()
def parse(self, my_input):
""" The main parsing function. Returns a parsed document AST."""
self.doc = Node('document', [[1, 1], [0, 0]])
self.tip = self.doc
self.refmap = {}
self.line_number = 0
self.last_line_length = 0
self.offset = 0
self.column = 0
self.last_matched_container = self.doc
self.current_line = ''
lines = re.split(reLineEnding, my_input)
length = len(lines)
if len(my_input) > 0 and my_input[-1] == '\n':
# ignore last blank line created by final newline
length -= 1
for i in range(length):
self.incorporate_line(lines[i])
while (self.tip):
self.finalize(self.tip, length)
self.process_inlines(self.doc)
return self.doc
CAMEL_RE = re.compile("(.)([A-Z](?:[a-z]+|(?<=[a-z0-9].)))")
Parser.blocks = dict(
(CAMEL_RE.sub(r'\1_\2', cls.__name__).lower(), cls)
for cls in Block.__subclasses__())

53
libs/commonmark/cmark.py Normal file
View file

@ -0,0 +1,53 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import argparse
import sys
import commonmark
def main():
parser = argparse.ArgumentParser(
description="Process Markdown according to "
"the CommonMark specification.")
if sys.version_info < (3, 0):
reload(sys) # noqa
sys.setdefaultencoding('utf-8')
parser.add_argument(
'infile',
nargs="?",
type=argparse.FileType('r'),
default=sys.stdin,
help="Input Markdown file to parse, defaults to STDIN")
parser.add_argument(
'-o',
nargs="?",
type=argparse.FileType('w'),
default=sys.stdout,
help="Output HTML/JSON file, defaults to STDOUT")
parser.add_argument('-a', action="store_true", help="Print formatted AST")
parser.add_argument('-aj', action="store_true", help="Output JSON AST")
args = parser.parse_args()
parser = commonmark.Parser()
f = args.infile
o = args.o
lines = []
for line in f:
lines.append(line)
data = "".join(lines)
ast = parser.parse(data)
if not args.a and not args.aj:
renderer = commonmark.HtmlRenderer()
o.write(renderer.render(ast))
exit()
if args.a:
# print ast
commonmark.dumpAST(ast)
exit()
# o.write(ast.to_JSON())
o.write(commonmark.dumpJSON(ast))
exit()
if __name__ == '__main__':
main()

113
libs/commonmark/common.py Normal file
View file

@ -0,0 +1,113 @@
from __future__ import absolute_import, unicode_literals
import re
import sys
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
if sys.version_info >= (3, 0):
if sys.version_info >= (3, 4):
import html
HTMLunescape = html.unescape
else:
from .entitytrans import _unescape
HTMLunescape = _unescape
else:
from commonmark import entitytrans
HTMLunescape = entitytrans._unescape
ENTITY = '&(?:#x[a-f0-9]{1,6}|#[0-9]{1,7}|[a-z][a-z0-9]{1,31});'
TAGNAME = '[A-Za-z][A-Za-z0-9-]*'
ATTRIBUTENAME = '[a-zA-Z_:][a-zA-Z0-9:._-]*'
UNQUOTEDVALUE = "[^\"'=<>`\\x00-\\x20]+"
SINGLEQUOTEDVALUE = "'[^']*'"
DOUBLEQUOTEDVALUE = '"[^"]*"'
ATTRIBUTEVALUE = "(?:" + UNQUOTEDVALUE + "|" + SINGLEQUOTEDVALUE + \
"|" + DOUBLEQUOTEDVALUE + ")"
ATTRIBUTEVALUESPEC = "(?:" + "\\s*=" + "\\s*" + ATTRIBUTEVALUE + ")"
ATTRIBUTE = "(?:" + "\\s+" + ATTRIBUTENAME + ATTRIBUTEVALUESPEC + "?)"
OPENTAG = "<" + TAGNAME + ATTRIBUTE + "*" + "\\s*/?>"
CLOSETAG = "</" + TAGNAME + "\\s*[>]"
HTMLCOMMENT = '<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->'
PROCESSINGINSTRUCTION = "[<][?].*?[?][>]"
DECLARATION = "<![A-Z]+" + "\\s+[^>]*>"
CDATA = '<!\\[CDATA\\[[\\s\\S]*?\\]\\]>'
HTMLTAG = "(?:" + OPENTAG + "|" + CLOSETAG + "|" + HTMLCOMMENT + "|" + \
PROCESSINGINSTRUCTION + "|" + DECLARATION + "|" + CDATA + ")"
reHtmlTag = re.compile('^' + HTMLTAG, re.IGNORECASE)
reBackslashOrAmp = re.compile(r'[\\&]')
ESCAPABLE = '[!"#$%&\'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]'
reEntityOrEscapedChar = re.compile(
'\\\\' + ESCAPABLE + '|' + ENTITY, re.IGNORECASE)
XMLSPECIAL = '[&<>"]'
reXmlSpecial = re.compile(XMLSPECIAL)
def unescape_char(s):
if s[0] == '\\':
return s[1]
else:
return HTMLunescape(s)
def unescape_string(s):
"""Replace entities and backslash escapes with literal characters."""
if re.search(reBackslashOrAmp, s):
return re.sub(
reEntityOrEscapedChar,
lambda m: unescape_char(m.group()),
s)
else:
return s
def normalize_uri(uri):
try:
return quote(uri.encode('utf-8'), safe=str('/@:+?=&()%#*,'))
except UnicodeDecodeError:
# Python 2 also throws a UnicodeDecodeError, complaining about
# the width of the "safe" string. Removing this parameter
# solves the issue, but yields overly aggressive quoting, but we
# can correct those errors manually.
s = quote(uri.encode('utf-8'))
s = re.sub(r'%40', '@', s)
s = re.sub(r'%3A', ':', s)
s = re.sub(r'%2B', '+', s)
s = re.sub(r'%3F', '?', s)
s = re.sub(r'%3D', '=', s)
s = re.sub(r'%26', '&', s)
s = re.sub(r'%28', '(', s)
s = re.sub(r'%29', ')', s)
s = re.sub(r'%25', '%', s)
s = re.sub(r'%23', '#', s)
s = re.sub(r'%2A', '*', s)
s = re.sub(r'%2C', ',', s)
return s
UNSAFE_MAP = {
'&': '&amp;',
'<': '&lt;',
'>': '&gt;',
'"': '&quot;',
}
def replace_unsafe_char(s):
return UNSAFE_MAP.get(s, s)
def escape_xml(s):
if s is None:
return ''
if re.search(reXmlSpecial, s):
return re.sub(
reXmlSpecial,
lambda m: replace_unsafe_char(m.group()),
s)
else:
return s

108
libs/commonmark/dump.py Normal file
View file

@ -0,0 +1,108 @@
from __future__ import absolute_import, unicode_literals
from builtins import str
import json
from commonmark.node import is_container
def prepare(obj, topnode=False):
"""Walk the complete AST, only returning needed data.
This removes circular references and allows us to output
JSON.
"""
a = []
for subnode, entered in obj.walker():
rep = {
'type': subnode.t,
}
if subnode.literal:
rep['literal'] = subnode.literal
if subnode.string_content:
rep['string_content'] = subnode.string_content
if subnode.title:
rep['title'] = subnode.title
if subnode.info:
rep['info'] = subnode.info
if subnode.destination:
rep['destination'] = subnode.destination
if subnode.list_data:
rep['list_data'] = subnode.list_data
if is_container(subnode):
rep['children'] = []
if entered and len(a) > 0:
if a[-1]['children']:
a[-1]['children'].append(rep)
else:
a[-1]['children'] = [rep]
else:
a.append(rep)
return a
def dumpJSON(obj):
"""Output AST in JSON form, this is destructive of block."""
prepared = prepare(obj)
return json.dumps(prepared, indent=4, sort_keys=True)
def dumpAST(obj, ind=0, topnode=False):
"""Print out a block/entire AST."""
indChar = ("\t" * ind) + "-> " if ind else ""
print(indChar + "[" + obj.t + "]")
if not obj.title == "":
print("\t" + indChar + "Title: " + (obj.title or ''))
if not obj.info == "":
print("\t" + indChar + "Info: " + (obj.info or ''))
if not obj.destination == "":
print("\t" + indChar + "Destination: " + (obj.destination or ''))
if obj.is_open:
print("\t" + indChar + "Open: " + str(obj.is_open))
if obj.last_line_blank:
print(
"\t" + indChar + "Last line blank: " + str(obj.last_line_blank))
if obj.sourcepos:
print("\t" + indChar + "Sourcepos: " + str(obj.sourcepos))
if not obj.string_content == "":
print("\t" + indChar + "String content: " + (obj.string_content or ''))
if not obj.info == "":
print("\t" + indChar + "Info: " + (obj.info or ''))
if not obj.literal == "":
print("\t" + indChar + "Literal: " + (obj.literal or ''))
if obj.list_data.get('type'):
print("\t" + indChar + "List Data: ")
print("\t\t" + indChar + "[type] = " + obj.list_data.get('type'))
if obj.list_data.get('bullet_char'):
print(
"\t\t" + indChar + "[bullet_char] = " +
obj.list_data['bullet_char'])
if obj.list_data.get('start'):
print(
"\t\t" + indChar + "[start] = " +
str(obj.list_data.get('start')))
if obj.list_data.get('delimiter'):
print(
"\t\t" + indChar + "[delimiter] = " +
obj.list_data.get('delimiter'))
if obj.list_data.get('padding'):
print(
"\t\t" + indChar + "[padding] = " +
str(obj.list_data.get('padding')))
if obj.list_data.get('marker_offset'):
print(
"\t\t" + indChar + "[marker_offset] = " +
str(obj.list_data.get('marker_offset')))
if obj.walker:
print("\t" + indChar + "Children:")
walker = obj.walker()
nxt = walker.nxt()
while nxt is not None and topnode is False:
dumpAST(nxt['node'], ind + 2, topnode=True)
nxt = walker.nxt()

File diff suppressed because it is too large Load diff

882
libs/commonmark/inlines.py Normal file
View file

@ -0,0 +1,882 @@
from __future__ import absolute_import, unicode_literals, division
import re
import sys
from commonmark import common
from commonmark.common import normalize_uri, unescape_string
from commonmark.node import Node
from commonmark.normalize_reference import normalize_reference
if sys.version_info >= (3, 0):
if sys.version_info >= (3, 4):
import html
HTMLunescape = html.unescape
else:
from .entitytrans import _unescape
HTMLunescape = _unescape
else:
from commonmark import entitytrans
HTMLunescape = entitytrans._unescape
# Some regexps used in inline parser:
ESCAPED_CHAR = '\\\\' + common.ESCAPABLE
rePunctuation = re.compile(
r'[!"#$%&\'()*+,\-./:;<=>?@\[\]\\^_`{|}~\xA1\xA7\xAB\xB6\xB7\xBB'
r'\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3'
r'\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F'
r'\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E'
r'\u085E\u0964\u0965\u0970\u0AF0\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12'
r'\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB'
r'\u1360-\u1368\u1400\u166D\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736'
r'\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-'
r'\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F'
r'\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E'
r'\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5'
r'\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC'
r'\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E42\u3001-\u3003\u3008-\u3011'
r'\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673'
r'\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E'
r'\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0'
r'\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63'
r'\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B'
r'\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]|\uD800[\uDD00-'
r'\uDD02\uDF9F\uDFD0]|\uD801\uDD6F|\uD802[\uDC57\uDD1F\uDD3F\uDE50-\uDE58'
r'\uDE7F\uDEF0-\uDEF6\uDF39-\uDF3F\uDF99-\uDF9C]|\uD804[\uDC47-\uDC4D'
r'\uDCBB\uDCBC\uDCBE-\uDCC1\uDD40-\uDD43\uDD74\uDD75\uDDC5-\uDDC9\uDDCD'
r'\uDDDB\uDDDD-\uDDDF\uDE38-\uDE3D\uDEA9]|\uD805[\uDCC6\uDDC1-\uDDD7'
r'\uDE41-\uDE43\uDF3C-\uDF3E]|\uD809[\uDC70-\uDC74]|\uD81A[\uDE6E\uDE6F'
r'\uDEF5\uDF37-\uDF3B\uDF44]|\uD82F\uDC9F|\uD836[\uDE87-\uDE8B]'
)
reLinkTitle = re.compile(
'^(?:"(' + ESCAPED_CHAR + '|[^"\\x00])*"' +
'|' +
'\'(' + ESCAPED_CHAR + '|[^\'\\x00])*\'' +
'|' +
'\\((' + ESCAPED_CHAR + '|[^()\\x00])*\\))')
reLinkDestinationBraces = re.compile(r'^(?:<(?:[^<>\n\\\x00]|\\.)*>)')
reEscapable = re.compile('^' + common.ESCAPABLE)
reEntityHere = re.compile('^' + common.ENTITY, re.IGNORECASE)
reTicks = re.compile(r'`+')
reTicksHere = re.compile(r'^`+')
reEllipses = re.compile(r'\.\.\.')
reDash = re.compile(r'--+')
reEmailAutolink = re.compile(
r"^<([a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9]"
r"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?"
r"(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)>")
reAutolink = re.compile(
r'^<[A-Za-z][A-Za-z0-9.+-]{1,31}:[^<>\x00-\x20]*>',
re.IGNORECASE)
reSpnl = re.compile(r'^ *(?:\n *)?')
reWhitespaceChar = re.compile(r'^^[ \t\n\x0b\x0c\x0d]')
reWhitespace = re.compile(r'[ \t\n\x0b\x0c\x0d]+')
reUnicodeWhitespaceChar = re.compile(r'^\s')
reFinalSpace = re.compile(r' *$')
reInitialSpace = re.compile(r'^ *')
reSpaceAtEndOfLine = re.compile(r'^ *(?:\n|$)')
reLinkLabel = re.compile(r'^\[(?:[^\\\[\]]|\\.){0,1000}\]')
# Matches a string of non-special characters.
reMain = re.compile(r'^[^\n`\[\]\\!<&*_\'"]+', re.MULTILINE)
def text(s):
node = Node('text', None)
node.literal = s
return node
def smart_dashes(chars):
en_count = 0
em_count = 0
if len(chars) % 3 == 0:
# If divisible by 3, use all em dashes
em_count = len(chars) // 3
elif len(chars) % 2 == 0:
# If divisble by 2, use all en dashes
en_count = len(chars) // 2
elif len(chars) % 3 == 2:
# if 2 extra dashes, use en dashfor last 2;
# em dashes for rest
en_count = 1
em_count = (len(chars) - 2) // 3
else:
# Use en dashes for last 4 hyphens; em dashes for rest
en_count = 2
em_count = (len(chars) - 4) // 3
return ('\u2014' * em_count) + ('\u2013' * en_count)
class InlineParser(object):
"""INLINE PARSER
These are methods of an InlineParser class, defined below.
An InlineParser keeps track of a subject (a string to be
parsed) and a position in that subject.
"""
def __init__(self, options={}):
self.subject = ''
self.brackets = None
self.pos = 0
self.refmap = {}
self.options = options
def match(self, regexString):
"""
If regexString matches at current position in the subject, advance
position in subject and return the match; otherwise return None.
"""
match = re.search(regexString, self.subject[self.pos:])
if match is None:
return None
else:
self.pos += match.end()
return match.group()
def peek(self):
""" Returns the character at the current subject position, or None if
there are no more characters."""
if self.pos < len(self.subject):
return self.subject[self.pos]
else:
return None
def spnl(self):
""" Parse zero or more space characters, including at
most one newline."""
self.match(reSpnl)
return True
# All of the parsers below try to match something at the current position
# in the subject. If they succeed in matching anything, they
# push an inline matched, advancing the subject.
def parseBackticks(self, block):
""" Attempt to parse backticks, adding either a backtick code span or a
literal sequence of backticks to the 'inlines' list."""
ticks = self.match(reTicksHere)
if ticks is None:
return False
after_open_ticks = self.pos
matched = self.match(reTicks)
while matched is not None:
if matched == ticks:
node = Node('code', None)
contents = self.subject[after_open_ticks:self.pos-len(ticks)] \
.replace('\n', ' ')
if contents.lstrip(' ') and contents[0] == contents[-1] == ' ':
node.literal = contents[1:-1]
else:
node.literal = contents
block.append_child(node)
return True
matched = self.match(reTicks)
# If we got here, we didn't match a closing backtick sequence.
self.pos = after_open_ticks
block.append_child(text(ticks))
return True
def parseBackslash(self, block):
"""
Parse a backslash-escaped special character, adding either the
escaped character, a hard line break (if the backslash is followed
by a newline), or a literal backslash to the block's children.
Assumes current character is a backslash.
"""
subj = self.subject
self.pos += 1
try:
subjchar = subj[self.pos]
except IndexError:
subjchar = None
if self.peek() == '\n':
self.pos += 1
node = Node('linebreak', None)
block.append_child(node)
elif subjchar and re.search(reEscapable, subjchar):
block.append_child(text(subjchar))
self.pos += 1
else:
block.append_child(text('\\'))
return True
def parseAutolink(self, block):
"""Attempt to parse an autolink (URL or email in pointy brackets)."""
m = self.match(reEmailAutolink)
if m:
# email
dest = m[1:-1]
node = Node('link', None)
node.destination = normalize_uri('mailto:' + dest)
node.title = ''
node.append_child(text(dest))
block.append_child(node)
return True
else:
m = self.match(reAutolink)
if m:
# link
dest = m[1:-1]
node = Node('link', None)
node.destination = normalize_uri(dest)
node.title = ''
node.append_child(text(dest))
block.append_child(node)
return True
return False
def parseHtmlTag(self, block):
"""Attempt to parse a raw HTML tag."""
m = self.match(common.reHtmlTag)
if m is None:
return False
else:
node = Node('html_inline', None)
node.literal = m
block.append_child(node)
return True
def scanDelims(self, c):
"""
Scan a sequence of characters == c, and return information about
the number of delimiters and whether they are positioned such that
they can open and/or close emphasis or strong emphasis. A utility
function for strong/emph parsing.
"""
numdelims = 0
startpos = self.pos
if c == "'" or c == '"':
numdelims += 1
self.pos += 1
else:
while (self.peek() == c):
numdelims += 1
self.pos += 1
if numdelims == 0:
return None
c_before = '\n' if startpos == 0 else self.subject[startpos - 1]
c_after = self.peek()
if c_after is None:
c_after = '\n'
# Python 2 doesn't recognize '\xa0' as whitespace
after_is_whitespace = re.search(reUnicodeWhitespaceChar, c_after) or \
c_after == '\xa0'
after_is_punctuation = re.search(rePunctuation, c_after)
before_is_whitespace = re.search(
reUnicodeWhitespaceChar, c_before) or \
c_before == '\xa0'
before_is_punctuation = re.search(rePunctuation, c_before)
left_flanking = not after_is_whitespace and \
(not after_is_punctuation or
before_is_whitespace or
before_is_punctuation)
right_flanking = not before_is_whitespace and \
(not before_is_punctuation or
after_is_whitespace or
after_is_punctuation)
if c == '_':
can_open = left_flanking and \
(not right_flanking or before_is_punctuation)
can_close = right_flanking and \
(not left_flanking or after_is_punctuation)
elif c == "'" or c == '"':
can_open = left_flanking and not right_flanking
can_close = right_flanking
else:
can_open = left_flanking
can_close = right_flanking
self.pos = startpos
return {
'numdelims': numdelims,
'can_open': can_open,
'can_close': can_close,
}
def handleDelim(self, cc, block):
"""Handle a delimiter marker for emphasis or a quote."""
res = self.scanDelims(cc)
if not res:
return False
numdelims = res.get('numdelims')
startpos = self.pos
self.pos += numdelims
if cc == "'":
contents = '\u2019'
elif cc == '"':
contents = '\u201C'
else:
contents = self.subject[startpos:self.pos]
node = text(contents)
block.append_child(node)
# Add entry to stack for this opener
self.delimiters = {
'cc': cc,
'numdelims': numdelims,
'origdelims': numdelims,
'node': node,
'previous': self.delimiters,
'next': None,
'can_open': res.get('can_open'),
'can_close': res.get('can_close'),
}
if self.delimiters['previous'] is not None:
self.delimiters['previous']['next'] = self.delimiters
return True
def removeDelimiter(self, delim):
if delim.get('previous') is not None:
delim['previous']['next'] = delim.get('next')
if delim.get('next') is None:
# Top of stack
self.delimiters = delim.get('previous')
else:
delim['next']['previous'] = delim.get('previous')
@staticmethod
def removeDelimitersBetween(bottom, top):
if bottom.get('next') != top:
bottom['next'] = top
top['previous'] = bottom
def processEmphasis(self, stack_bottom):
openers_bottom = {
'_': stack_bottom,
'*': stack_bottom,
"'": stack_bottom,
'"': stack_bottom,
}
odd_match = False
use_delims = 0
# Find first closer above stack_bottom
closer = self.delimiters
while closer is not None and closer.get('previous') != stack_bottom:
closer = closer.get('previous')
# Move forward, looking for closers, and handling each
while closer is not None:
if not closer.get('can_close'):
closer = closer.get('next')
else:
# found emphasis closer. now look back for first
# matching opener:
opener = closer.get('previous')
opener_found = False
closercc = closer.get('cc')
while (opener is not None and opener != stack_bottom and
opener != openers_bottom[closercc]):
odd_match = (closer.get('can_open') or
opener.get('can_close')) and \
closer['origdelims'] % 3 != 0 and \
(opener['origdelims'] +
closer['origdelims']) % 3 == 0
if opener.get('cc') == closercc and \
opener.get('can_open') and \
not odd_match:
opener_found = True
break
opener = opener.get('previous')
old_closer = closer
if closercc == '*' or closercc == '_':
if not opener_found:
closer = closer.get('next')
else:
# Calculate actual number of delimiters used from
# closer
use_delims = 2 if (
closer['numdelims'] >= 2 and
opener['numdelims'] >= 2) else 1
opener_inl = opener.get('node')
closer_inl = closer.get('node')
# Remove used delimiters from stack elts and inlines
opener['numdelims'] -= use_delims
closer['numdelims'] -= use_delims
opener_inl.literal = opener_inl.literal[
:len(opener_inl.literal) - use_delims]
closer_inl.literal = closer_inl.literal[
:len(closer_inl.literal) - use_delims]
# Build contents for new Emph element
if use_delims == 1:
emph = Node('emph', None)
else:
emph = Node('strong', None)
tmp = opener_inl.nxt
while tmp and tmp != closer_inl:
nxt = tmp.nxt
tmp.unlink()
emph.append_child(tmp)
tmp = nxt
opener_inl.insert_after(emph)
# Remove elts between opener and closer in delimiters
# stack
self.removeDelimitersBetween(opener, closer)
# If opener has 0 delims, remove it and the inline
if opener['numdelims'] == 0:
opener_inl.unlink()
self.removeDelimiter(opener)
if closer['numdelims'] == 0:
closer_inl.unlink()
tempstack = closer['next']
self.removeDelimiter(closer)
closer = tempstack
elif closercc == "'":
closer['node'].literal = '\u2019'
if opener_found:
opener['node'].literal = '\u2018'
closer = closer['next']
elif closercc == '"':
closer['node'].literal = '\u201D'
if opener_found:
opener['node'].literal = '\u201C'
closer = closer['next']
if not opener_found and not odd_match:
# Set lower bound for future searches for openers:
# We don't do this with odd_match because a **
# that doesn't match an earlier * might turn into
# an opener, and the * might be matched by something
# else.
openers_bottom[closercc] = old_closer['previous']
if not old_closer['can_open']:
# We can remove a closer that can't be an opener,
# once we've seen there's no matching opener:
self.removeDelimiter(old_closer)
# Remove all delimiters
while self.delimiters is not None and self.delimiters != stack_bottom:
self.removeDelimiter(self.delimiters)
def parseLinkTitle(self):
"""
Attempt to parse link title (sans quotes), returning the string
or None if no match.
"""
title = self.match(reLinkTitle)
if title is None:
return None
else:
# chop off quotes from title and unescape:
return unescape_string(title[1:-1])
def parseLinkDestination(self):
"""
Attempt to parse link destination, returning the string or
None if no match.
"""
res = self.match(reLinkDestinationBraces)
if res is None:
if self.peek() == '<':
return None
# TODO handrolled parser; res should be None or the string
savepos = self.pos
openparens = 0
while True:
c = self.peek()
if c is None:
break
if c == '\\' and re.search(
reEscapable, self.subject[self.pos+1:self.pos+2]):
self.pos += 1
if self.peek() is not None:
self.pos += 1
elif c == '(':
self.pos += 1
openparens += 1
elif c == ')':
if openparens < 1:
break
else:
self.pos += 1
openparens -= 1
elif re.search(reWhitespaceChar, c):
break
else:
self.pos += 1
if self.pos == savepos and c != ')':
return None
res = self.subject[savepos:self.pos]
return normalize_uri(unescape_string(res))
else:
# chop off surrounding <..>:
return normalize_uri(unescape_string(res[1:-1]))
def parseLinkLabel(self):
"""
Attempt to parse a link label, returning number of
characters parsed.
"""
# Note: our regex will allow something of form [..\];
# we disallow it here rather than using lookahead in the regex:
m = self.match(reLinkLabel)
if m is None or len(m) > 1001:
return 0
else:
return len(m)
def parseOpenBracket(self, block):
"""
Add open bracket to delimiter stack and add a text node to
block's children.
"""
startpos = self.pos
self.pos += 1
node = text('[')
block.append_child(node)
# Add entry to stack for this opener
self.addBracket(node, startpos, False)
return True
def parseBang(self, block):
"""
If next character is [, and ! delimiter to delimiter stack and
add a text node to block's children. Otherwise just add a text
node.
"""
startpos = self.pos
self.pos += 1
if self.peek() == '[':
self.pos += 1
node = text('![')
block.append_child(node)
# Add entry to stack for this openeer
self.addBracket(node, startpos + 1, True)
else:
block.append_child(text('!'))
return True
def parseCloseBracket(self, block):
"""
Try to match close bracket against an opening in the delimiter
stack. Add either a link or image, or a plain [ character,
to block's children. If there is a matching delimiter,
remove it from the delimiter stack.
"""
title = None
matched = False
self.pos += 1
startpos = self.pos
# get last [ or ![
opener = self.brackets
if opener is None:
# no matched opener, just return a literal
block.append_child(text(']'))
return True
if not opener.get('active'):
# no matched opener, just return a literal
block.append_child(text(']'))
# take opener off brackets stack
self.removeBracket()
return True
# If we got here, opener is a potential opener
is_image = opener.get('image')
# Check to see if we have a link/image
savepos = self.pos
# Inline link?
if self.peek() == '(':
self.pos += 1
self.spnl()
dest = self.parseLinkDestination()
if dest is not None and self.spnl():
# make sure there's a space before the title
if re.search(reWhitespaceChar, self.subject[self.pos-1]):
title = self.parseLinkTitle()
if self.spnl() and self.peek() == ')':
self.pos += 1
matched = True
else:
self.pos = savepos
if not matched:
# Next, see if there's a link label
beforelabel = self.pos
n = self.parseLinkLabel()
if n > 2:
reflabel = self.subject[beforelabel:beforelabel + n]
elif not opener.get('bracket_after'):
# Empty or missing second label means to use the first
# label as the reference. The reference must not
# contain a bracket. If we know there's a bracket, we
# don't even bother checking it.
reflabel = self.subject[opener.get('index'):startpos]
if n == 0:
# If shortcut reference link, rewind before spaces we skipped.
self.pos = savepos
if reflabel:
# lookup rawlabel in refmap
link = self.refmap.get(normalize_reference(reflabel))
if link:
dest = link['destination']
title = link['title']
matched = True
if matched:
node = Node('image' if is_image else 'link', None)
node.destination = dest
node.title = title or ''
tmp = opener.get('node').nxt
while tmp:
nxt = tmp.nxt
tmp.unlink()
node.append_child(tmp)
tmp = nxt
block.append_child(node)
self.processEmphasis(opener.get('previousDelimiter'))
self.removeBracket()
opener.get('node').unlink()
# We remove this bracket and processEmphasis will remove
# later delimiters.
# Now, for a link, we also deactivate earlier link openers.
# (no links in links)
if not is_image:
opener = self.brackets
while opener is not None:
if not opener.get('image'):
# deactivate this opener
opener['active'] = False
opener = opener.get('previous')
return True
else:
# no match
# remove this opener from stack
self.removeBracket()
self.pos = startpos
block.append_child(text(']'))
return True
def addBracket(self, node, index, image):
if self.brackets is not None:
self.brackets['bracketAfter'] = True
self.brackets = {
'node': node,
'previous': self.brackets,
'previousDelimiter': self.delimiters,
'index': index,
'image': image,
'active': True,
}
def removeBracket(self):
self.brackets = self.brackets.get('previous')
def parseEntity(self, block):
"""Attempt to parse an entity."""
m = self.match(reEntityHere)
if m:
block.append_child(text(HTMLunescape(m)))
return True
else:
return False
def parseString(self, block):
"""
Parse a run of ordinary characters, or a single character with
a special meaning in markdown, as a plain string.
"""
m = self.match(reMain)
if m:
if self.options.get('smart'):
s = re.sub(reEllipses, '\u2026', m)
s = re.sub(reDash, lambda x: smart_dashes(x.group()), s)
block.append_child(text(s))
else:
block.append_child(text(m))
return True
else:
return False
def parseNewline(self, block):
"""
Parse a newline. If it was preceded by two spaces, return a hard
line break; otherwise a soft line break.
"""
# assume we're at a \n
self.pos += 1
lastc = block.last_child
if lastc and lastc.t == 'text' and lastc.literal[-1] == ' ':
linebreak = len(lastc.literal) >= 2 and lastc.literal[-2] == ' '
lastc.literal = re.sub(reFinalSpace, '', lastc.literal)
if linebreak:
node = Node('linebreak', None)
else:
node = Node('softbreak', None)
block.append_child(node)
else:
block.append_child(Node('softbreak', None))
# gobble leading spaces in next line
self.match(reInitialSpace)
return True
def parseReference(self, s, refmap):
"""Attempt to parse a link reference, modifying refmap."""
self.subject = s
self.pos = 0
startpos = self.pos
# label:
match_chars = self.parseLinkLabel()
if match_chars == 0 or match_chars == 2:
return 0
else:
rawlabel = self.subject[:match_chars]
# colon:
if (self.peek() == ':'):
self.pos += 1
else:
self.pos = startpos
return 0
# link url
self.spnl()
dest = self.parseLinkDestination()
if dest is None:
self.pos = startpos
return 0
beforetitle = self.pos
self.spnl()
title = None
if self.pos != beforetitle:
title = self.parseLinkTitle()
if title is None:
title = ''
# rewind before spaces
self.pos = beforetitle
# make sure we're at line end:
at_line_end = True
if self.match(reSpaceAtEndOfLine) is None:
if title == '':
at_line_end = False
else:
# the potential title we found is not at the line end,
# but it could still be a legal link reference if we
# discard the title
title == ''
# rewind before spaces
self.pos = beforetitle
# and instead check if the link URL is at the line end
at_line_end = self.match(reSpaceAtEndOfLine) is not None
if not at_line_end:
self.pos = startpos
return 0
normlabel = normalize_reference(rawlabel)
if normlabel == '':
# label must contain non-whitespace characters
self.pos = startpos
return 0
if not refmap.get(normlabel):
refmap[normlabel] = {
'destination': dest,
'title': title
}
return (self.pos - startpos)
def parseInline(self, block):
"""
Parse the next inline element in subject, advancing subject
position.
On success, add the result to block's children and return True.
On failure, return False.
"""
res = False
c = self.peek()
if c is None:
return False
if c == '\n':
res = self.parseNewline(block)
elif c == '\\':
res = self.parseBackslash(block)
elif c == '`':
res = self.parseBackticks(block)
elif c == '*' or c == '_':
res = self.handleDelim(c, block)
elif c == "'" or c == '"':
res = self.options.get('smart') and self.handleDelim(c, block)
elif c == '[':
res = self.parseOpenBracket(block)
elif c == '!':
res = self.parseBang(block)
elif c == ']':
res = self.parseCloseBracket(block)
elif c == '<':
res = self.parseAutolink(block) or self.parseHtmlTag(block)
elif c == '&':
res = self.parseEntity(block)
else:
res = self.parseString(block)
if not res:
self.pos += 1
block.append_child(text(c))
return True
def parseInlines(self, block):
"""
Parse string content in block into inline children,
using refmap to resolve references.
"""
self.subject = block.string_content.strip()
self.pos = 0
self.delimiters = None
self.brackets = None
while (self.parseInline(block)):
pass
# allow raw string to be garbage collected
block.string_content = None
self.processEmphasis(None)
parse = parseInlines

41
libs/commonmark/main.py Normal file
View file

@ -0,0 +1,41 @@
# 2014 - Bibek Kafle & Roland Shoemaker
# 2015-2017 - Nikolas Nyby
# Port of @jgm's commonmark.js implementation of the CommonMark spec.
# Basic usage:
#
# import commonmark
# parser = commonmark.Parser()
# renderer = commonmark.HtmlRenderer()
# print(renderer.render(parser.parse('Hello *world*')))
from __future__ import absolute_import, unicode_literals
from commonmark.blocks import Parser
from commonmark.dump import dumpAST, dumpJSON
from commonmark.render.html import HtmlRenderer
from commonmark.render.rst import ReStructuredTextRenderer
def commonmark(text, format="html"):
"""Render CommonMark into HTML, JSON or AST
Optional keyword arguments:
format: 'html' (default), 'json' or 'ast'
>>> commonmark("*hello!*")
'<p><em>hello</em></p>\\n'
"""
parser = Parser()
ast = parser.parse(text)
if format not in ["html", "json", "ast", "rst"]:
raise ValueError("format must be 'html', 'json' or 'ast'")
if format == "html":
renderer = HtmlRenderer()
return renderer.render(ast)
if format == "json":
return dumpJSON(ast)
if format == "ast":
return dumpAST(ast)
if format == "rst":
renderer = ReStructuredTextRenderer()
return renderer.render(ast)

179
libs/commonmark/node.py Normal file
View file

@ -0,0 +1,179 @@
from __future__ import unicode_literals
import re
reContainer = re.compile(
r'(document|block_quote|list|item|paragraph|'
r'heading|emph|strong|link|image|'
r'custom_inline|custom_block)')
def is_container(node):
return (re.search(reContainer, node.t) is not None)
class NodeWalker(object):
def __init__(self, root):
self.current = root
self.root = root
self.entering = True
def __next__(self):
cur = self.current
entering = self.entering
if cur is None:
raise StopIteration
container = is_container(cur)
if entering and container:
if cur.first_child:
self.current = cur.first_child
self.entering = True
else:
# stay on node but exit
self.entering = False
elif cur == self.root:
self.current = None
elif cur.nxt is None:
self.current = cur.parent
self.entering = False
else:
self.current = cur.nxt
self.entering = True
return cur, entering
next = __next__
def __iter__(self):
return self
def nxt(self):
""" for backwards compatibility """
try:
cur, entering = next(self)
return {
'entering': entering,
'node': cur,
}
except StopIteration:
return None
def resume_at(self, node, entering):
self.current = node
self.entering = (entering is True)
class Node(object):
def __init__(self, node_type, sourcepos):
self.t = node_type
self.parent = None
self.first_child = None
self.last_child = None
self.prv = None
self.nxt = None
self.sourcepos = sourcepos
self.last_line_blank = False
self.last_line_checked = False
self.is_open = True
self.string_content = ''
self.literal = None
self.list_data = {}
self.info = None
self.destination = None
self.title = None
self.is_fenced = False
self.fence_char = None
self.fence_length = 0
self.fence_offset = None
self.level = None
self.on_enter = None
self.on_exit = None
def __repr__(self):
return "Node {} [{}]".format(self.t, self.literal)
def pretty(self):
from pprint import pprint
pprint(self.__dict__)
def normalize(self):
prev = None
for curr, _ in self.walker():
if prev is None:
prev = curr
continue
if prev.t == 'text' and curr.t == 'text':
prev.literal += curr.literal
curr.unlink()
else:
prev = curr
def is_container(self):
return is_container(self)
def append_child(self, child):
child.unlink()
child.parent = self
if self.last_child:
self.last_child.nxt = child
child.prv = self.last_child
self.last_child = child
else:
self.first_child = child
self.last_child = child
def prepend_child(self, child):
child.unlink()
child.parent = self
if self.first_child:
self.first_child.prv = child
child.nxt = self.first_child
self.first_child = child
else:
self.first_child = child
self.last_child = child
def unlink(self):
if self.prv:
self.prv.nxt = self.nxt
elif self.parent:
self.parent.first_child = self.nxt
if self.nxt:
self.nxt.prv = self.prv
elif self.parent:
self.parent.last_child = self.prv
self.parent = None
self.nxt = None
self.prv = None
def insert_after(self, sibling):
sibling.unlink()
sibling.nxt = self.nxt
if sibling.nxt:
sibling.nxt.prv = sibling
sibling.prv = self
self.nxt = sibling
sibling.parent = self.parent
if not sibling.nxt:
sibling.parent.last_child = sibling
def insert_before(self, sibling):
sibling.unlink()
sibling.prv = self.prv
if sibling.prv:
sibling.prv.nxt = sibling
sibling.nxt = self
self.prv = sibling
sibling.parent = self.parent
if not sibling.prv:
sibling.parent.first_child = sibling
def walker(self):
return NodeWalker(self)

View file

@ -0,0 +1,165 @@
"""Case-folding and whitespace normalization"""
# Unicode Case Folding table has been derived from the following work:
#
# CaseFolding-12.0.0.txt
# Date: 2019-01-22, 08:18:22 GMT
# (c) 2019 Unicode(R) Inc.
# Unicode and the Unicode Logo are registered trademarks
# of Unicode, Inc. in the U.S. and other countries.
# For terms of use, see http://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
# For documentation, see http://www.unicode.org/reports/tr44/
import re
import sys
from builtins import str, chr
__all__ = ["normalize_reference"]
if sys.version_info < (3,) and sys.maxunicode <= 0xffff:
# shim for Python 2.x UCS2 build
_unichr = chr
def chr(cdp):
if 0x10000 <= cdp < 0x110000:
cdp -= 0x10000
return (_unichr(0xd800 | (cdp >> 10)) +
_unichr(0xdc00 | (cdp & 0x3ff)))
return _unichr(cdp)
def _parse_table(tbl):
xlat = {}
cur_i, cur_j = -1, 0
for entry in tbl.split(';'):
arr = entry.split(',')
info = [int(x, 36) if x else 0 for x in arr[0].split(':')]
arr = [int(x, 36) for x in arr[1:]]
assert not any(x in xlat for x in arr)
sfx = ''.join(map(chr, arr))
streak, stride = 0, 1
if len(info) == 2:
fdt, delta = info
elif len(info) == 3:
fdt, streak, delta = info
else:
fdt, streak, delta, stride = info
assert streak >= 0 and stride >= 1
cur_i += fdt + 1
cur_j -= delta
assert cur_j != 0
i = cur_i
last = cur_i + streak
while i <= last:
# uniqueness and idempotency
assert i not in xlat and i + cur_j not in xlat
assert i not in arr
xlat[i] = chr(i + cur_j) + sfx
i += stride
return xlat
XLAT = _parse_table(
# ===== Start of Unicode Case Folding table =====
'1t:p:-w;37:-kn;a:m:kn;n:6:;6:3w,37;w:1a:-31:2;1b:5k,lj;1:4:-5k:2;6:e::'
'2;f:-aa,32;:18:aa:2;19:3e;:4:-3e:2;5:7h;1:-da;:2:5t:2;3:-5p;:5p;1:1:-5'
'o;1:5o;2:-26;:-3f;:-1;:5m;1:-5o;:-2;1:-4;:2;:5s;3:-5u;:-2;1:-1;:4:5x:2'
';5:-61;:61;1:-61;2:61;1:-61;:61;1:1:-60;1:2:60:2;3:-62;:4:62:4;b:-1;:1'
';1:-1;:1;1:-1;:g:1:2;i:g::2;h:av,lo;:-aw;:2:1:2;3:2q;:-15;:12:-1l:2;13'
':3n;1:g:-3n:2;n:-8bu;:8bu;1:4k;:-8gb;2:8br;1:5g;:-7c;:-2;:8:1y:2;72:-3'
'7;16:2:37:2;5:;8:-37;6:26;1:2:1;3:-r;1:1:1;1:m,lk,ld;:g:9;h:8:;c:b,lk,'
'ld;h:k;c:-7;:12;:-5;3:-a;:7;1:m:-n:2;n:1j;:-6;2:c;:4;1:-1t;1:8;:-8;2:2'
':3n;2:f:-5u;f:v:1c;27:w:v:2;15:1g::2;1h:-e;:c:e:2;e:2m::2;2o:11:-1b;2d'
':2a,136;26w:11:-5mq;12:6::6;mo:5:5m0;1on:4sm;:-1;:-9;:1:-2;1:1;:-7;:-o'
';:-vzb;7:16:tj7;18:2:;8y:44:-2bl:2;45:5yn,mp;:-b,lk;:-2,lm;:-1,lm;:p,j'
'i;:-5xb;2:5wx,37;1:2m:-5yk:2;2v:7:9;f:5:;f:7:;f:7:;f:5:;7:5fn,lv;1:2,l'
'v,lc;1:2,lv,ld;1:2,lv,n6;2:6:-5ft:2;e:7:;n:7:3c,qh;7:7:8,qh;7:7:-o,qh;'
'7:7:8,qh;7:7:-1k,qh;7:7:8,qh;9:-6,qh;:5hc,qh;:6,qh;1:-3,n6;:1,n6,qh;:1'
':-5j2;1:1:1u;1:5hd,qh;1:-6;3:-5h3,qh;:5ha,qh;:a,qh;1:-7,n6;:1,n6,qh;:3'
':-5h6;3:5hb,qh;5:4,lk,lc;:1,lk,ld;2:3,n6;:1,lk,n6;:1:-5jq;1:1:2k;7:5h5'
',lk,lc;:1,lk,ld;:5,lv;1:-2,n6;:1,lk,n6;:1:-5ju;1:1:2w;1:-2x;5:33,qh;:5'
'h0,qh;:-4,qh;1:7,n6;:1,n6,qh;:1:-5gu;1:1:-2;1:5h1,qh;89:8a;3:o2;:-3d;6'
':-6ea;19:f:c;y:f;mq:p:-p;1ft:1a:-m;2n:1b;1:8ag;:-5ch;:5c1;2:4:-8a0:2;5'
':8bh;:-v;:y;:-1;1:3:-8bj:3;b:1:8cg;1:2q:-8cg:2;2y:2::2;6:nym::nym;nyn:'
'16::2;1p:q::2;4h:c::2;f:1o::2;1y:2::2;3:r9h;:8:-r9h:2;c:;1:wmh;2:2:-wm'
'h:2;5:i::2;j:wn9;:b;:-4;:-a;:3;1:-1e;:o;:-l;:-xbp;:a:pr:2;d:;1:1d;:wlv'
';:-5cb;q1:27:2oo;fpr:jii,2u;:1,2x;:1,30;:1,2u,2x;:1,2u,30;:-c,38;:1,38'
';c:-z8,12u;:1,12d;:1,12j;:-9,12u;:b,12l;sp:p:-1cjn;ym:13:-8;4v:z:;1jj:'
'1e:-o;2e7:v:w;gwv:v:;o8v:x:-2'
# ===== End of Unicode Case Folding table =====
)
def _check_native(tbl):
"""
Determine if Python's own native implementation
subsumes the supplied case folding table
"""
try:
for i in tbl:
stv = chr(i)
if stv.casefold() == stv:
return False
except AttributeError:
return False
return True
# Hoist version check out of function for performance
SPACE_RE = re.compile(r'[ \t\r\n]+')
if _check_native(XLAT):
def normalize_reference(string):
"""
Normalize reference label: collapse internal whitespace
to single space, remove leading/trailing whitespace, case fold.
"""
return SPACE_RE.sub(' ', string[1:-1].strip()).casefold()
elif sys.version_info >= (3,) or sys.maxunicode > 0xffff:
def normalize_reference(string):
"""
Normalize reference label: collapse internal whitespace
to single space, remove leading/trailing whitespace, case fold.
"""
return SPACE_RE.sub(' ', string[1:-1].strip()).translate(XLAT)
else:
def _get_smp_regex():
xls = sorted(x - 0x10000 for x in XLAT if x >= 0x10000)
xls.append(-1)
fmt, (dsh, opn, pip, cse) = str('\\u%04x'), str('-[|]')
rga, srk, erk = [str(r'[ \t\r\n]+')], 0, -2
for k in xls:
new_hir = (erk ^ k) >> 10 != 0
if new_hir or erk + 1 != k:
if erk >= 0 and srk != erk:
if srk + 1 != erk:
rga.append(dsh)
rga.append(fmt % (0xdc00 + (erk & 0x3ff)))
if new_hir:
if erk >= 0:
rga.append(cse)
if k < 0:
break
rga.append(pip)
rga.append(fmt % (0xd800 + (k >> 10)))
rga.append(opn)
srk = k
rga.append(fmt % (0xdc00 + (srk & 0x3ff)))
erk = k
return re.compile(str().join(rga))
def _subst_handler(matchobj):
src = matchobj.group(0)
hiv = ord(src[0])
if hiv < 0xd800:
return ' '
return XLAT[0x10000 + ((hiv & 0x3ff) << 10) | (ord(src[1]) & 0x3ff)]
SMP_RE = _get_smp_regex()
def normalize_reference(string):
"""
Normalize reference label: collapse internal whitespace
to single space, remove leading/trailing whitespace, case fold.
"""
return SMP_RE.sub(_subst_handler, string[1:-1].strip()).translate(XLAT)

View file

View file

@ -0,0 +1,228 @@
from __future__ import unicode_literals
import re
from builtins import str
from commonmark.common import escape_xml
from commonmark.render.renderer import Renderer
reUnsafeProtocol = re.compile(
r'^javascript:|vbscript:|file:|data:', re.IGNORECASE)
reSafeDataProtocol = re.compile(
r'^data:image\/(?:png|gif|jpeg|webp)', re.IGNORECASE)
def potentially_unsafe(url):
return re.search(reUnsafeProtocol, url) and \
(not re.search(reSafeDataProtocol, url))
class HtmlRenderer(Renderer):
def __init__(self, options={}):
# by default, soft breaks are rendered as newlines in HTML
options['softbreak'] = options.get('softbreak') or '\n'
# set to "<br />" to make them hard breaks
# set to " " if you want to ignore line wrapping in source
self.disable_tags = 0
self.last_out = '\n'
self.options = options
def escape(self, text):
return escape_xml(text)
def tag(self, name, attrs=None, selfclosing=None):
"""Helper function to produce an HTML tag."""
if self.disable_tags > 0:
return
self.buf += '<' + name
if attrs and len(attrs) > 0:
for attrib in attrs:
self.buf += ' ' + attrib[0] + '="' + attrib[1] + '"'
if selfclosing:
self.buf += ' /'
self.buf += '>'
self.last_out = '>'
# Node methods #
def text(self, node, entering=None):
self.out(node.literal)
def softbreak(self, node=None, entering=None):
self.lit(self.options['softbreak'])
def linebreak(self, node=None, entering=None):
self.tag('br', [], True)
self.cr()
def link(self, node, entering):
attrs = self.attrs(node)
if entering:
if not (self.options.get('safe') and
potentially_unsafe(node.destination)):
attrs.append(['href', self.escape(node.destination)])
if node.title:
attrs.append(['title', self.escape(node.title)])
self.tag('a', attrs)
else:
self.tag('/a')
def image(self, node, entering):
if entering:
if self.disable_tags == 0:
if self.options.get('safe') and \
potentially_unsafe(node.destination):
self.lit('<img src="" alt="')
else:
self.lit('<img src="' +
self.escape(node.destination) +
'" alt="')
self.disable_tags += 1
else:
self.disable_tags -= 1
if self.disable_tags == 0:
if node.title:
self.lit('" title="' + self.escape(node.title))
self.lit('" />')
def emph(self, node, entering):
self.tag('em' if entering else '/em')
def strong(self, node, entering):
self.tag('strong' if entering else '/strong')
def paragraph(self, node, entering):
grandparent = node.parent.parent
attrs = self.attrs(node)
if grandparent is not None and grandparent.t == 'list':
if grandparent.list_data['tight']:
return
if entering:
self.cr()
self.tag('p', attrs)
else:
self.tag('/p')
self.cr()
def heading(self, node, entering):
tagname = 'h' + str(node.level)
attrs = self.attrs(node)
if entering:
self.cr()
self.tag(tagname, attrs)
else:
self.tag('/' + tagname)
self.cr()
def code(self, node, entering):
self.tag('code')
self.out(node.literal)
self.tag('/code')
def code_block(self, node, entering):
info_words = node.info.split() if node.info else []
attrs = self.attrs(node)
if len(info_words) > 0 and len(info_words[0]) > 0:
attrs.append(['class', 'language-' +
self.escape(info_words[0])])
self.cr()
self.tag('pre')
self.tag('code', attrs)
self.out(node.literal)
self.tag('/code')
self.tag('/pre')
self.cr()
def thematic_break(self, node, entering):
attrs = self.attrs(node)
self.cr()
self.tag('hr', attrs, True)
self.cr()
def block_quote(self, node, entering):
attrs = self.attrs(node)
if entering:
self.cr()
self.tag('blockquote', attrs)
self.cr()
else:
self.cr()
self.tag('/blockquote')
self.cr()
def list(self, node, entering):
tagname = 'ul' if node.list_data['type'] == 'bullet' else 'ol'
attrs = self.attrs(node)
if entering:
start = node.list_data['start']
if start is not None and start != 1:
attrs.append(['start', str(start)])
self.cr()
self.tag(tagname, attrs)
self.cr()
else:
self.cr()
self.tag('/' + tagname)
self.cr()
def item(self, node, entering):
attrs = self.attrs(node)
if entering:
self.tag('li', attrs)
else:
self.tag('/li')
self.cr()
def html_inline(self, node, entering):
if self.options.get('safe'):
self.lit('<!-- raw HTML omitted -->')
else:
self.lit(node.literal)
def html_block(self, node, entering):
self.cr()
if self.options.get('safe'):
self.lit('<!-- raw HTML omitted -->')
else:
self.lit(node.literal)
self.cr()
def custom_inline(self, node, entering):
if entering and node.on_enter:
self.lit(node.on_enter)
elif (not entering) and node.on_exit:
self.lit(node.on_exit)
def custom_block(self, node, entering):
self.cr()
if entering and node.on_enter:
self.lit(node.on_enter)
elif (not entering) and node.on_exit:
self.lit(node.on_exit)
self.cr()
# Helper methods #
def out(self, s):
self.lit(self.escape(s))
def attrs(self, node):
att = []
if self.options.get('sourcepos'):
pos = node.sourcepos
if pos:
att.append(['data-sourcepos', str(pos[0][0]) + ':' +
str(pos[0][1]) + '-' + str(pos[1][0]) + ':' +
str(pos[1][1])])
return att

View file

@ -0,0 +1,43 @@
from __future__ import unicode_literals
class Renderer(object):
def render(self, ast):
"""Walks the AST and calls member methods for each Node type.
@param ast {Node} The root of the abstract syntax tree.
"""
walker = ast.walker()
self.buf = ''
self.last_out = '\n'
event = walker.nxt()
while event is not None:
type_ = event['node'].t
if hasattr(self, type_):
getattr(self, type_)(event['node'], event['entering'])
event = walker.nxt()
return self.buf
def lit(self, s):
"""Concatenate a literal string to the buffer.
@param str {String} The string to concatenate.
"""
self.buf += s
self.last_out = s
def cr(self):
if self.last_out != '\n':
self.lit('\n')
def out(self, s):
"""Concatenate a string to the buffer possibly escaping the content.
Concrete renderer implementations should override this method.
@param str {String} The string to concatenate.
"""
self.lit(s)

View file

@ -0,0 +1,159 @@
from __future__ import unicode_literals
from commonmark.render.renderer import Renderer
class ReStructuredTextRenderer(Renderer):
"""
Render reStructuredText from Markdown
Example:
.. code:: python
import commonmark
parser = commonmark.Parser()
ast = parser.parse('Hello `inline code` example')
renderer = commonmark.ReStructuredTextRenderer()
rst = renderer.render(ast)
print(rst) # Hello ``inline code`` example
"""
def __init__(self, indent_char=' '):
self.indent_char = indent_char
self.indent_length = 0
def lit(self, s):
if s == '\n':
indent = '' # Avoid whitespace if we're just adding a newline
elif self.last_out != '\n':
indent = '' # Don't indent if we're in the middle of a line
else:
indent = self.indent_char * self.indent_length
return super(ReStructuredTextRenderer, self).lit(indent + s)
def cr(self):
self.lit('\n')
def indent_lines(self, literal, indent_length=4):
indent = self.indent_char * indent_length
new_lines = []
for line in literal.splitlines():
new_lines.append(indent + line)
return '\n'.join(new_lines)
# Nodes
def document(self, node, entering):
pass
def softbreak(self, node, entering):
self.cr()
def linebreak(self, node, entering):
self.cr()
self.cr()
def text(self, node, entering):
self.out(node.literal)
def emph(self, node, entering):
self.out('*')
def strong(self, node, entering):
self.out('**')
def paragraph(self, node, entering):
if node.parent.t == 'item':
pass
else:
self.cr()
def link(self, node, entering):
if entering:
self.out('`')
else:
self.out(' <%s>`_' % node.destination)
def image(self, node, entering):
directive = '.. image:: ' + node.destination
if entering:
self.out(directive)
self.cr()
self.indent_length += 4
self.out(':alt: ')
else:
self.indent_length -= 4
def code(self, node, entering):
self.out('``')
self.out(node.literal)
self.out('``')
def code_block(self, node, entering):
directive = '.. code::'
language_name = None
info_words = node.info.split() if node.info else []
if len(info_words) > 0 and len(info_words[0]) > 0:
language_name = info_words[0]
if language_name:
directive += ' ' + language_name
self.cr()
self.out(directive)
self.cr()
self.cr()
self.out(self.indent_lines(node.literal))
self.cr()
def list(self, node, entering):
if entering:
self.cr()
def item(self, node, entering):
tagname = '*' if node.list_data['type'] == 'bullet' else '#.'
if entering:
self.out(tagname + ' ')
else:
self.cr()
def block_quote(self, node, entering):
if entering:
self.indent_length += 4
else:
self.indent_length -= 4
def heading(self, node, entering):
heading_chars = [
'#',
'*',
'=',
'-',
'^',
'"'
]
try:
heading_char = heading_chars[node.level-1]
except IndexError:
# Default to the last level if we're in too deep
heading_char = heading_chars[-1]
heading_length = len(node.first_child.literal)
banner = heading_char * heading_length
if entering:
self.cr()
else:
self.cr()
self.out(banner)
self.cr()

View file

View file

@ -0,0 +1,172 @@
import unittest
import commonmark
class TestCommonmark(unittest.TestCase):
def setUp(self):
self.parser = commonmark.Parser()
self.renderer = commonmark.ReStructuredTextRenderer()
def render_rst(self, test_str):
ast = self.parser.parse(test_str)
rst = self.renderer.render(ast)
return rst
def assertEqualRender(self, src_markdown, expected_rst):
rendered_rst = self.render_rst(src_markdown)
self.assertEqual(rendered_rst, expected_rst)
def test_strong(self):
src_markdown = 'Hello **Strong**'
expected_rst = '\nHello **Strong**\n'
self.assertEqualRender(src_markdown, expected_rst)
def test_emphasis(self):
src_markdown = 'Hello *Emphasis*'
expected_rst = '\nHello *Emphasis*\n'
self.assertEqualRender(src_markdown, expected_rst)
def test_paragraph(self):
src_markdown = 'Hello paragraph'
expected_rst = '\nHello paragraph\n'
self.assertEqualRender(src_markdown, expected_rst)
def test_link(self):
src_markdown = '[Link](http://example.com)'
expected_rst = '\n`Link <http://example.com>`_\n'
self.assertEqualRender(src_markdown, expected_rst)
def test_image(self):
src_markdown = '![Image](http://placekitten.com/100/100)'
expected_rst = """
.. image:: http://placekitten.com/100/100
:alt: Image
"""
self.assertEqualRender(src_markdown, expected_rst)
def test_code(self):
src_markdown = 'Test `inline code` with backticks'
expected_rst = '\nTest ``inline code`` with backticks\n'
self.assertEqualRender(src_markdown, expected_rst)
def test_code_block(self):
src_markdown = """
```python
# code block
print '3 backticks or'
print 'indent 4 spaces'
```
"""
expected_rst = """
.. code:: python
# code block
print '3 backticks or'
print 'indent 4 spaces'
"""
self.assertEqualRender(src_markdown, expected_rst)
def test_unordered_list(self):
src_markdown = """
This is a list:
* List item
* List item
* List item
"""
expected_rst = """
This is a list:
* List item
* List item
* List item
"""
self.assertEqualRender(src_markdown, expected_rst)
def test_ordered_list(self):
src_markdown = """
This is a ordered list:
1. One
2. Two
3. Three
"""
expected_rst = """
This is a ordered list:
#. One
#. Two
#. Three
"""
self.assertEqualRender(src_markdown, expected_rst)
def test_block_quote(self):
src_markdown = """
Before the blockquote:
> The blockquote
After the blockquote
"""
expected_rst = """
Before the blockquote:
The blockquote
After the blockquote
"""
self.assertEqualRender(src_markdown, expected_rst)
def test_heading(self):
src_markdown = '''
# Heading 1
## Heading 2
### Heading 3
#### Heading 4
##### Heading 5
###### Heading 6
'''
expected_rst = '''
Heading 1
#########
Heading 2
*********
Heading 3
=========
Heading 4
---------
Heading 5
^^^^^^^^^
Heading 6
"""""""""
'''
self.assertEqualRender(src_markdown, expected_rst)
def test_multiple_paragraphs(self):
src_markdown = '''
Start of first paragraph that
continues on a new line
This is the second paragraph
'''
expected_rst = '''
Start of first paragraph that
continues on a new line
This is the second paragraph
'''
self.assertEqualRender(src_markdown, expected_rst)
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,242 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import re
import timeit
import codecs
import argparse
import sys
from builtins import str
from commonmark.render.html import HtmlRenderer
from commonmark.main import Parser, dumpAST
class colors(object):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def trace_calls(frame, event, arg):
co = frame.f_code
func_name = co.co_name
if func_name == "write":
return
line_no = frame.f_lineno
filename = co.co_filename
if event == "call" and not re.match("__", func_name) and \
re.search("CommonMark.py", filename) \
and func_name != "dumpAST":
print("-> " + frame.f_back.f_code.co_name +
" at " + str(frame.f_back.f_lineno) +
" called " + func_name + " at " + str(line_no) +
" in " + filename)
return trace_calls
def main():
parser = argparse.ArgumentParser(
description="script to run the CommonMark specification tests " +
"against the CommonMark.py parser")
parser.add_argument(
'-t',
help="Single test to run or comma separated list " +
"of tests (-t 10 or -t 10,11,12,13)")
parser.add_argument(
'-p',
action="store_true",
help="Print passed test information")
parser.add_argument(
'-f',
action="store_true",
help="Print failed tests (during -np...)")
parser.add_argument(
'-i',
action="store_true",
help="Interactive Markdown input mode")
parser.add_argument(
'-d',
action="store_true",
help="Debug, trace calls")
parser.add_argument(
'-np',
action="store_true",
help="Only print section header, tick, or cross")
parser.add_argument(
'-s',
action="store_true",
help="Print percent of tests passed by category")
args = parser.parse_args()
if args.d:
sys.settrace(trace_calls)
renderer = HtmlRenderer()
parser = Parser()
f = codecs.open("spec.txt", encoding="utf-8")
datalist = []
for line in f:
datalist.append(line)
data = "".join(datalist)
passed = 0
failed = 0
catStats = {}
examples = []
example_number = 0
current_section = ""
tabChar = '\u2192'
spaceChar = '\u2423'
nbspChar = '\u00A0'
def showSpaces(t):
t = re.sub("\\t", tabChar, t)
t = re.sub(" ", spaceChar, t)
t = re.sub(nbspChar, spaceChar, t)
return t
t = re.sub("\r\n", "\n", data)
tests = re.sub(
re.compile("^<!-- END TESTS -->(.|[\n])*", flags=re.M), '', t)
testMatch = re.findall(
re.compile(
r'^`{32} example\n'
r'([\s\S]*?)^\.\n([\s\S]*?)'
r'^`{32}$'
r'|^#{1,6} *(.*)$',
re.M),
tests)
for match in testMatch:
if not match[2] == "":
current_section = match[2]
else:
example_number += 1
examples.append({
'markdown': match[0],
'html': match[1],
'section': current_section,
'number': example_number})
current_section = ""
startTime = timeit.default_timer()
if args.i:
print(
colors.OKGREEN +
"(To end input of Markdown block enter 'end' on " +
"it's own line, to quit enter 'quit')" +
colors.ENDC)
while True:
s = ""
while True:
if sys.version_info >= (3, 0):
inp = input(colors.OKBLUE + 'Markdown: ' + colors.ENDC)
else:
inp = raw_input(colors.OKBLUE + 'Markdown: ' + colors.ENDC) # noqa
if not inp == "end" and inp != "quit":
s += inp + "\n"
elif inp == "end":
s = s[:-1]
break
elif inp == "quit":
print(colors.HEADER+"bye!"+colors.ENDC)
exit(0)
ast = parser.parse(s)
html = renderer.render(ast)
print(colors.WARNING+"="*10+"AST====="+colors.ENDC)
dumpAST(ast)
print(colors.WARNING+"="*10+"HTML===="+colors.ENDC)
print(html)
# some tests?
if args.t:
tests = args.t.split(",")
choice_examples = []
for t in tests:
if not t == "" and len(examples) > int(t):
choice_examples.append(examples[int(t)-1])
examples = choice_examples
# all tests
for i, example in enumerate(examples): # [0,examples[0]]
if not example['section'] == "" and \
not current_section == example['section']:
print('\n' + colors.HEADER + '[' + example['section'] + ']' +
colors.ENDC + ' ', end='')
current_section = example['section']
catStats.update({current_section: [0, 0, 0]})
catStats[current_section][2] += 1
if args.d:
print(colors.HEADER+"[Parsing]"+colors.ENDC)
ast = parser.parse(re.sub(tabChar, "\t", example['markdown']))
if args.d:
print(colors.HEADER+"[Rendering]"+colors.ENDC)
actual = renderer.render(ast)
if re.sub('\t', tabChar, actual) == example['html']:
passed += 1
catStats[current_section][0] += 1
if not args.f:
print(colors.OKGREEN + '' + colors.ENDC, end='')
if args.d:
dumpAST(ast)
if args.p or args.d and not args.np:
print(
colors.OKBLUE +
"=== markdown ===============\n" +
colors.ENDC + showSpaces(example['markdown']) +
colors.OKBLUE +
"\n=== expected ===============\n" +
colors.ENDC + showSpaces(example['html']) +
colors.OKBLUE +
"\n=== got ====================\n" +
colors.ENDC + showSpaces(actual))
else:
failed += 1
catStats[current_section][1] += 1
if args.t:
print("Test #" + str(args.t.split(",")[i]), end='')
else:
print("Test #" + str(i+1), end='')
print(' ' + colors.FAIL + "" + colors.ENDC)
if args.d:
dumpAST(ast)
if not args.np or args.f:
print(
colors.WARNING +
"=== markdown ===============\n" +
colors.ENDC + showSpaces(example['markdown']) +
colors.WARNING +
"\n=== expected ===============\n" +
colors.ENDC + showSpaces(example['html']) +
colors.WARNING +
"\n=== got ====================\n" +
colors.ENDC + showSpaces(actual))
print('\n' + str(passed) + ' tests passed, ' + str(failed) + ' failed')
endTime = timeit.default_timer()
runTime = endTime - startTime
if args.s:
for i in catStats.keys():
per = catStats[i][0]/catStats[i][2]
print(colors.HEADER + "[" + i + "]" + colors.ENDC +
"\t" + str(per*100) + "% Passed")
print("runtime: " + str(runTime) + "s")
if (failed > 0):
sys.exit(1)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,157 @@
from __future__ import unicode_literals
import unittest
try:
from hypothesis import given, example
except ImportError:
# Mock out hypothesis stuff for python 2.6
def given(a):
def func(b):
return
return func
example = given
try:
from hypothesis.strategies import text
except ImportError:
def text():
pass
import commonmark
from commonmark.blocks import Parser
from commonmark.render.html import HtmlRenderer
from commonmark.inlines import InlineParser
from commonmark.node import NodeWalker, Node
class TestCommonmark(unittest.TestCase):
def test_output(self):
s = commonmark.commonmark('*hello!*')
self.assertEqual(s, '<p><em>hello!</em></p>\n')
def test_unicode(self):
s = commonmark.commonmark('<div>\u2020</div>\n')
self.assertEqual(s, '<div>\u2020</div>\n',
'Unicode works in an HTML block.')
commonmark.commonmark('* unicode: \u2020')
commonmark.commonmark('# unicode: \u2020')
commonmark.commonmark('```\n# unicode: \u2020\n```')
def test_null_string_bug(self):
s = commonmark.commonmark('> sometext\n>\n\n')
self.assertEqual(
s,
'<blockquote>\n<pre><code>sometext\n</code></pre>'
'\n</blockquote>\n')
def test_normalize_contracts_text_nodes(self):
md = '_a'
ast = Parser().parse(md)
def assert_text_literals(text_literals):
walker = ast.walker()
document, _ = walker.next()
self.assertEqual(document.t, 'document')
paragraph, _ = walker.next()
self.assertEqual(paragraph.t, 'paragraph')
for literal in text_literals:
text, _ = walker.next()
self.assertEqual(text.t, 'text')
self.assertEqual(text.literal, literal)
paragraph, _ = walker.next()
self.assertEqual(paragraph.t, 'paragraph')
assert_text_literals(['_', 'a'])
ast.normalize()
# assert text nodes are contracted
assert_text_literals(['_a'])
ast.normalize()
# assert normalize() doesn't alter a normalized ast
assert_text_literals(['_a'])
def test_dumpAST_orderedlist(self):
md = '1.'
ast = Parser().parse(md)
commonmark.dumpAST(ast)
@given(text())
def test_random_text(self, s):
commonmark.commonmark(s)
def test_smart_dashes(self):
md = 'a - b -- c --- d ---- e ----- f'
EM = '\u2014'
EN = '\u2013'
expected_html = (
'<p>'
+ 'a - '
+ 'b ' + EN + ' '
+ 'c ' + EM + ' '
+ 'd ' + EN + EN + ' '
+ 'e ' + EM + EN + ' '
+ 'f</p>\n')
parser = commonmark.Parser(options=dict(smart=True))
ast = parser.parse(md)
renderer = commonmark.HtmlRenderer()
html = renderer.render(ast)
self.assertEqual(html, expected_html)
def test_regex_vulnerability_link_label(self):
i = 200
while i <= 2000:
s = commonmark.commonmark('[' + ('\\' * i) + '\n')
self.assertEqual(s, '<p>' + '[' + ('\\' * (i // 2)) + '</p>\n',
'[\\\\... %d deep' % (i,))
i *= 10
def test_regex_vulnerability_link_destination(self):
i = 200
while i <= 2000:
s = commonmark.commonmark(('[](' * i) + '\n')
self.assertEqual(s, '<p>' + ('[](' * i) + '</p>\n',
'[]( %d deep' % (i,))
i *= 10
class TestHtmlRenderer(unittest.TestCase):
def test_init(self):
HtmlRenderer()
class TestInlineParser(unittest.TestCase):
def test_init(self):
InlineParser()
class TestNode(unittest.TestCase):
def test_doc_node(self):
Node('document', [[1, 1], [0, 0]])
class TestNodeWalker(unittest.TestCase):
def test_node_walker(self):
node = Node('document', [[1, 1], [0, 0]])
NodeWalker(node)
def test_node_walker_iter(self):
node = Node('document', [[1, 1], [0, 0]])
for subnode, entered in node.walker():
pass
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = Parser()
@given(text())
@example('')
@example('* unicode: \u2020')
def test_text(self, s):
self.parser.parse(s)
if __name__ == '__main__':
unittest.main()

View file

@ -23,9 +23,9 @@ def get_keywords():
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> master)"
git_full = "ce46d91fa2d325a13c2830f8030a316ed49b6cc9"
git_date = "2020-09-05 11:15:34 -0700"
git_refnames = " (tag: 0.4.11)"
git_full = "fe416b437c28cd6cf383248b90005a2d516549f2"
git_date = "2021-01-29 22:33:25 -0800"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords

View file

@ -3,6 +3,9 @@ import logging
import math
import numpy as np
from .constants import FRAMERATE_RATIOS
from .golden_section_search import gss
from .sklearn_shim import TransformerMixin
logging.basicConfig(level=logging.INFO)
@ -14,11 +17,25 @@ class FailedToFindAlignmentException(Exception):
class FFTAligner(TransformerMixin):
def __init__(self):
def __init__(self, max_offset_samples=None):
self.max_offset_samples = max_offset_samples
self.best_offset_ = None
self.best_score_ = None
self.get_score_ = False
def _zero_out_extreme_offsets(self, convolve, substring):
convolve = np.copy(convolve)
if self.max_offset_samples is None:
return convolve
offset_to_index = lambda offset: len(convolve) - 1 + offset - len(substring)
convolve[:offset_to_index(-self.max_offset_samples)] = convolve[offset_to_index(self.max_offset_samples):] = 0
return convolve
def _compute_argmax(self, convolve, substring):
best_idx = np.argmax(convolve)
self.best_offset_ = len(convolve) - 1 - best_idx - len(substring)
self.best_score_ = convolve[best_idx]
def fit(self, refstring, substring, get_score=False):
refstring, substring = [
list(map(int, s))
@ -33,9 +50,9 @@ class FFTAligner(TransformerMixin):
subft = np.fft.fft(np.append(np.zeros(extra_zeros + len(refstring)), substring))
refft = np.fft.fft(np.flip(np.append(refstring, np.zeros(len(substring) + extra_zeros)), 0))
convolve = np.real(np.fft.ifft(subft * refft))
best_idx = np.argmax(convolve)
self.best_offset_ = len(convolve) - 1 - best_idx - len(substring)
self.best_score_ = convolve[best_idx]
self._compute_argmax(self._zero_out_extreme_offsets(convolve, substring), substring)
if self.best_score_ == 0.:
self._compute_argmax(convolve, substring)
self.get_score_ = get_score
return self
@ -47,24 +64,40 @@ class FFTAligner(TransformerMixin):
class MaxScoreAligner(TransformerMixin):
def __init__(self, base_aligner, sample_rate=None, max_offset_seconds=None):
if isinstance(base_aligner, type):
self.base_aligner = base_aligner()
else:
self.base_aligner = base_aligner
self.max_offset_seconds = max_offset_seconds
def __init__(self, base_aligner, srtin=None, sample_rate=None, max_offset_seconds=None):
self.srtin = srtin
if sample_rate is None or max_offset_seconds is None:
self.max_offset_samples = None
else:
self.max_offset_samples = abs(max_offset_seconds * sample_rate)
self.max_offset_samples = abs(int(max_offset_seconds * sample_rate))
if isinstance(base_aligner, type):
self.base_aligner = base_aligner(max_offset_samples=self.max_offset_samples)
else:
self.base_aligner = base_aligner
self.max_offset_seconds = max_offset_seconds
self._scores = []
def fit_gss(self, refstring, subpipe_maker):
def opt_func(framerate_ratio, is_last_iter):
subpipe = subpipe_maker(framerate_ratio)
substring = subpipe.fit_transform(self.srtin)
score = self.base_aligner.fit_transform(refstring, substring, get_score=True)
logger.info('got score %.0f (offset %d) for ratio %.3f', score[0], score[1], framerate_ratio)
if is_last_iter:
self._scores.append((score, subpipe))
return -score[0]
gss(opt_func, 0.9, 1.1)
return self
def fit(self, refstring, subpipes):
if not isinstance(subpipes, list):
subpipes = [subpipes]
for subpipe in subpipes:
if hasattr(subpipe, 'transform'):
substring = subpipe.transform(None)
if callable(subpipe):
self.fit_gss(refstring, subpipe)
continue
elif hasattr(subpipe, 'transform'):
substring = subpipe.transform(self.srtin)
else:
substring = subpipe
self._scores.append((
@ -84,4 +117,4 @@ class MaxScoreAligner(TransformerMixin):
'--max-offset-seconds with a number larger than '
'{}'.format(self.max_offset_seconds))
(score, offset), subpipe = max(scores, key=lambda x: x[0][0])
return offset, subpipe
return (score, offset), subpipe

View file

@ -6,12 +6,14 @@ SAMPLE_RATE = 100
FRAMERATE_RATIOS = [24./23.976, 25./23.976, 25./24.]
DEFAULT_FRAME_RATE = 48000
DEFAULT_NON_SPEECH_LABEL = 0.
DEFAULT_ENCODING = 'infer'
DEFAULT_MAX_SUBTITLE_SECONDS = 10
DEFAULT_START_SECONDS = 0
DEFAULT_SCALE_FACTOR = 1
DEFAULT_VAD = 'subs_then_webrtc'
DEFAULT_MAX_OFFSET_SECONDS = 600
DEFAULT_MAX_OFFSET_SECONDS = 60
DEFAULT_APPLY_OFFSET_SECONDS = 0
SUBTITLE_EXTENSIONS = ('srt', 'ass', 'ssa', 'sub')

204
libs/ffsubsync/ffsubsync.py Normal file → Executable file
View file

@ -50,7 +50,7 @@ def make_test_case(args, npy_savename, sync_was_successful):
if args.log_dir_path and os.path.isdir(args.log_dir_path):
log_path = os.path.join(args.log_dir_path, log_path)
shutil.copy(log_path, tar_dir)
shutil.copy(args.srtin, tar_dir)
shutil.copy(args.srtin[0], tar_dir)
if sync_was_successful:
shutil.move(args.srtout, tar_dir)
if _ref_format(args.reference) in SUBTITLE_EXTENSIONS:
@ -75,44 +75,96 @@ def make_test_case(args, npy_savename, sync_was_successful):
return 0
def try_sync(args, reference_pipe, srt_pipes, result):
def get_srt_pipe_maker(args, srtin):
if srtin is None:
srtin_format = 'srt'
else:
srtin_format = os.path.splitext(srtin)[-1][1:]
parser = make_subtitle_parser(fmt=srtin_format, caching=True, **args.__dict__)
return lambda scale_factor: make_subtitle_speech_pipeline(
**override(args, scale_factor=scale_factor, parser=parser)
)
def get_framerate_ratios_to_try(args):
if args.no_fix_framerate:
return []
else:
framerate_ratios = list(np.concatenate([
np.array(FRAMERATE_RATIOS), 1./np.array(FRAMERATE_RATIOS)
]))
if args.gss:
framerate_ratios.append(None)
return framerate_ratios
def try_sync(args, reference_pipe, result):
sync_was_successful = True
exc = None
try:
logger.info('extracting speech segments from subtitles file %s...', args.srtin)
for srt_pipe in srt_pipes:
srt_pipe.fit(args.srtin)
logger.info('...done')
logger.info('computing alignments...')
offset_samples, best_srt_pipe = MaxScoreAligner(
FFTAligner, SAMPLE_RATE, args.max_offset_seconds
).fit_transform(
reference_pipe.transform(args.reference),
srt_pipes,
)
logger.info('...done')
offset_seconds = offset_samples / float(SAMPLE_RATE)
scale_step = best_srt_pipe.named_steps['scale']
logger.info('offset seconds: %.3f', offset_seconds)
logger.info('framerate scale factor: %.3f', scale_step.scale_factor)
output_steps = [('shift', SubtitleShifter(offset_seconds))]
if args.merge_with_reference:
output_steps.append(
('merge',
SubtitleMerger(reference_pipe.named_steps['parse'].subs_))
)
output_pipe = Pipeline(output_steps)
out_subs = output_pipe.fit_transform(scale_step.subs_)
if args.output_encoding != 'same':
out_subs = out_subs.set_encoding(args.output_encoding)
logger.info('writing output to {}'.format(args.srtout or 'stdout'))
out_subs.write_file(args.srtout)
logger.info('extracting speech segments from %s...',
'stdin' if not args.srtin else 'subtitles file(s) {}'.format(args.srtin))
if not args.srtin:
args.srtin = [None]
for srtin in args.srtin:
srtout = srtin if args.overwrite_input else args.srtout
srt_pipe_maker = get_srt_pipe_maker(args, srtin)
framerate_ratios = get_framerate_ratios_to_try(args)
srt_pipes = [srt_pipe_maker(1.)] + [srt_pipe_maker(rat) for rat in framerate_ratios]
for srt_pipe in srt_pipes:
if callable(srt_pipe):
continue
else:
srt_pipe.fit(srtin)
if not args.skip_infer_framerate_ratio and hasattr(reference_pipe[-1], 'num_frames'):
inferred_framerate_ratio_from_length = float(reference_pipe[-1].num_frames) / srt_pipes[0][-1].num_frames
logger.info('inferred frameratio ratio: %.3f' % inferred_framerate_ratio_from_length)
srt_pipes.append(srt_pipe_maker(inferred_framerate_ratio_from_length).fit(srtin))
logger.info('...done')
logger.info('computing alignments...')
if args.skip_sync:
best_score = 0.
best_srt_pipe = srt_pipes[0]
if callable(best_srt_pipe):
best_srt_pipe = best_srt_pipe(1.0).fit(srtin)
offset_samples = 0
else:
(best_score, offset_samples), best_srt_pipe = MaxScoreAligner(
FFTAligner, srtin, SAMPLE_RATE, args.max_offset_seconds
).fit_transform(
reference_pipe.transform(args.reference),
srt_pipes,
)
logger.info('...done')
offset_seconds = offset_samples / float(SAMPLE_RATE) + args.apply_offset_seconds
scale_step = best_srt_pipe.named_steps['scale']
logger.info('score: %.3f', best_score)
logger.info('offset seconds: %.3f', offset_seconds)
logger.info('framerate scale factor: %.3f', scale_step.scale_factor)
output_steps = [('shift', SubtitleShifter(offset_seconds))]
if args.merge_with_reference:
output_steps.append(
('merge', SubtitleMerger(reference_pipe.named_steps['parse'].subs_))
)
output_pipe = Pipeline(output_steps)
out_subs = output_pipe.fit_transform(scale_step.subs_)
if args.output_encoding != 'same':
out_subs = out_subs.set_encoding(args.output_encoding)
logger.info('writing output to {}'.format(srtout or 'stdout'))
out_subs.write_file(srtout)
except FailedToFindAlignmentException as e:
sync_was_successful = False
logger.error(e)
except Exception as e:
exc = e
sync_was_successful = False
logger.error(e)
else:
result['offset_seconds'] = offset_seconds
result['framerate_scale_factor'] = scale_step.scale_factor
finally:
if exc is not None:
raise exc
result['sync_was_successful'] = sync_was_successful
return sync_was_successful
@ -133,7 +185,7 @@ def make_reference_pipe(args):
if args.vad is not None:
logger.warning('Vad specified, but reference was not a movie')
return Pipeline([
('deserialize', DeserializeSpeechTransformer())
('deserialize', DeserializeSpeechTransformer(args.non_speech_label))
])
else:
vad = args.vad or DEFAULT_VAD
@ -143,34 +195,20 @@ def make_reference_pipe(args):
if ref_stream is not None and not ref_stream.startswith('0:'):
ref_stream = '0:' + ref_stream
return Pipeline([
('speech_extract', VideoSpeechTransformer(vad=vad,
sample_rate=SAMPLE_RATE,
frame_rate=args.frame_rate,
start_seconds=args.start_seconds,
ffmpeg_path=args.ffmpeg_path,
ref_stream=ref_stream,
vlc_mode=args.vlc_mode,
gui_mode=args.gui_mode))
('speech_extract', VideoSpeechTransformer(
vad=vad,
sample_rate=SAMPLE_RATE,
frame_rate=args.frame_rate,
non_speech_label=args.non_speech_label,
start_seconds=args.start_seconds,
ffmpeg_path=args.ffmpeg_path,
ref_stream=ref_stream,
vlc_mode=args.vlc_mode,
gui_mode=args.gui_mode
)),
])
def make_srt_pipes(args):
if args.no_fix_framerate:
framerate_ratios = [1.]
else:
framerate_ratios = np.concatenate([
[1.], np.array(FRAMERATE_RATIOS), 1./np.array(FRAMERATE_RATIOS)
])
parser = make_subtitle_parser(fmt=os.path.splitext(args.srtin)[-1][1:], caching=True, **args.__dict__)
srt_pipes = [
make_subtitle_speech_pipeline(
**override(args, scale_factor=scale_factor, parser=parser)
)
for scale_factor in framerate_ratios
]
return srt_pipes
def extract_subtitles_from_reference(args):
stream = args.extract_subs_from_stream
if not stream.startswith('0:s:'):
@ -204,13 +242,19 @@ def extract_subtitles_from_reference(args):
def validate_args(args):
if args.vlc_mode:
logger.setLevel(logging.CRITICAL)
if len(args.srtin) > 1 and not args.overwrite_input:
raise ValueError('cannot specify multiple input srt files without overwriting')
if len(args.srtin) > 1 and args.make_test_case:
raise ValueError('cannot specify multiple input srt files for test cases')
if len(args.srtin) > 1 and args.gui_mode:
raise ValueError('cannot specify multiple input srt files in GUI mode')
if args.make_test_case and not args.gui_mode: # this validation not necessary for gui mode
if args.srtin is None or args.srtout is None:
raise ValueError('need to specify input and output srt files for test cases')
if args.overwrite_input:
if args.extract_subs_from_stream is not None:
raise ValueError('input overwriting not allowed for extracting subtitles from reference')
if args.srtin is None:
if not args.srtin:
raise ValueError(
'need to specify input srt if --overwrite-input is specified since we cannot overwrite stdin'
)
@ -221,17 +265,19 @@ def validate_args(args):
if args.extract_subs_from_stream is not None:
if args.make_test_case:
raise ValueError('test case is for sync and not subtitle extraction')
if args.srtin is not None:
if args.srtin:
raise ValueError('stream specified for reference subtitle extraction; -i flag for sync input not allowed')
def validate_file_permissions(args):
error_string_template = 'unable to {action} {file}; try ensuring file exists and has correct permissions'
if not os.access(args.reference, os.R_OK):
raise ValueError('unable to read reference %s (try checking permissions)' % args.reference)
if not os.access(args.srtin, os.R_OK):
raise ValueError('unable to read input subtitles %s (try checking permissions)' % args.srtin)
if os.path.exists(args.srtout) and not os.access(args.srtout, os.W_OK):
raise ValueError('unable to write output subtitles %s (try checking permissions)' % args.srtout)
raise ValueError(error_string_template.format(action='read reference', file=args.reference))
for srtin in args.srtin:
if srtin is not None and not os.access(srtin, os.R_OK):
raise ValueError(error_string_template.format(action='read input subtitles', file=srtin))
if args.srtout is not None and os.path.exists(args.srtout) and not os.access(args.srtout, os.W_OK):
raise ValueError(error_string_template.format(action='write output subtitles', file=args.srtout))
if args.make_test_case or args.serialize_speech:
npy_savename = os.path.splitext(args.reference)[0] + '.npz'
if os.path.exists(npy_savename) and not os.access(npy_savename, os.W_OK):
@ -251,10 +297,8 @@ def run(args):
logger.error(e)
result['retval'] = 1
return result
if args.overwrite_input:
args.srtout = args.srtin
if args.gui_mode and args.srtout is None:
args.srtout = '{}.synced.srt'.format(os.path.splitext(args.srtin)[0])
args.srtout = '{}.synced.srt'.format(os.path.splitext(args.srtin[0])[0])
try:
validate_file_permissions(args)
except ValueError as e:
@ -288,11 +332,10 @@ def run(args):
npy_savename = os.path.splitext(args.reference)[0] + '.npz'
np.savez_compressed(npy_savename, speech=reference_pipe.transform(args.reference))
logger.info('...done')
if args.srtin is None:
if args.srtin[0] is None:
logger.info('unsynchronized subtitle file not specified; skipping synchronization')
return result
srt_pipes = make_srt_pipes(args)
sync_was_successful = try_sync(args, reference_pipe, srt_pipes, result)
sync_was_successful = try_sync(args, reference_pipe, result)
if log_handler is not None and log_path is not None:
assert args.make_test_case
log_handler.close()
@ -309,7 +352,7 @@ def add_main_args_for_cli(parser):
'reference',
help='Reference (video, subtitles, or a numpy array with VAD speech) to which to synchronize input subtitles.'
)
parser.add_argument('-i', '--srtin', help='Input subtitles file (default=stdin).')
parser.add_argument('-i', '--srtin', nargs='*', help='Input subtitles file (default=stdin).')
parser.add_argument('-o', '--srtout', help='Output subtitles file (default=stdout).')
parser.add_argument('--merge-with-reference', '--merge', action='store_true',
help='Merge reference subtitles with synced output subtitles.')
@ -321,14 +364,16 @@ def add_main_args_for_cli(parser):
'--reference-stream', '--refstream', '--reference-track', '--reftrack',
default=None,
help='Which stream/track in the video file to use as reference, '
'formatted according to ffmpeg conventions. For example, s:0 '
'uses the first subtitle track; a:3 would use the third audio track.'
'formatted according to ffmpeg conventions. For example, 0:s:0 '
'uses the first subtitle track; 0:a:3 would use the third audio track. '
'You can also drop the leading `0:`; i.e. use s:0 or a:3, respectively. '
'Example: `ffs ref.mkv -i in.srt -o out.srt --reference-stream s:2`'
)
def add_cli_only_args(parser):
# parser.add_argument('-v', '--version', action='version',
# version='{package} {version}'.format(package=__package__, version=get_version()))
parser.add_argument('-v', '--version', action='version',
version='{package} {version}'.format(package=__package__, version=get_version()))
parser.add_argument('--overwrite-input', action='store_true',
help='If specified, will overwrite the input srt instead of writing the output to a new file.')
parser.add_argument('--encoding', default=DEFAULT_ENCODING,
@ -340,11 +385,18 @@ def add_cli_only_args(parser):
parser.add_argument('--start-seconds', type=int, default=DEFAULT_START_SECONDS,
help='Start time for processing '
'(default=%d seconds).' % DEFAULT_START_SECONDS)
parser.add_argument('--max-offset-seconds', type=int, default=DEFAULT_MAX_OFFSET_SECONDS,
parser.add_argument('--max-offset-seconds', type=float, default=DEFAULT_MAX_OFFSET_SECONDS,
help='The max allowed offset seconds for any subtitle segment '
'(default=%d seconds).' % DEFAULT_MAX_OFFSET_SECONDS)
parser.add_argument('--apply-offset-seconds', type=float, default=DEFAULT_APPLY_OFFSET_SECONDS,
help='Apply a predefined offset in seconds to all subtitle segments '
'(default=%d seconds).' % DEFAULT_APPLY_OFFSET_SECONDS)
parser.add_argument('--frame-rate', type=int, default=DEFAULT_FRAME_RATE,
help='Frame rate for audio extraction (default=%d).' % DEFAULT_FRAME_RATE)
parser.add_argument('--skip-infer-framerate-ratio', action='store_true',
help='If set, do not try to infer framerate ratio based on duration ratio.')
parser.add_argument('--non-speech-label', type=float, default=DEFAULT_NON_SPEECH_LABEL,
help='Label to use for frames detected as non-speech (default=%f)' % DEFAULT_NON_SPEECH_LABEL)
parser.add_argument('--output-encoding', default='utf-8',
help='What encoding to use for writing output subtitles '
'(default=utf-8). Can indicate "same" to use same '
@ -372,6 +424,8 @@ def add_cli_only_args(parser):
'directory).')
parser.add_argument('--vlc-mode', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gui-mode', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--skip-sync', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gss', action='store_true', help=argparse.SUPPRESS)
def make_parser():

0
libs/ffsubsync/ffsubsync_gui.py Normal file → Executable file
View file

View file

@ -13,14 +13,11 @@ class open_file(object):
if filename is None:
stream = sys.stdout if 'w' in args else sys.stdin
if six.PY3:
self.closeable = open(stream.fileno(), *args, **kwargs)
self.fh = self.closeable.buffer
self.fh = open(stream.fileno(), *args, **kwargs)
else:
self.closeable = stream
self.fh = self.closeable
self.fh = stream
elif isinstance(filename, six.string_types):
self.fh = open(filename, *args, **kwargs)
self.closeable = self.fh
self.closing = True
else:
self.fh = filename
@ -30,6 +27,6 @@ class open_file(object):
def __exit__(self, exc_type, exc_val, exc_tb):
if self.closing:
self.closeable.close()
self.fh.close()
return False

View file

@ -35,6 +35,16 @@ class GenericSubtitle(object):
eq = eq and self.inner == other.inner
return eq
@property
def content(self):
if isinstance(self.inner, srt.Subtitle):
ret = self.inner.content
elif isinstance(self.inner, pysubs2.SSAEvent):
ret = self.inner.text
else:
raise NotImplementedError('unsupported subtitle type: %s' % type(self.inner))
return ret
def resolve_inner_timestamps(self):
ret = copy.deepcopy(self.inner)
if isinstance(self.inner, srt.Subtitle):
@ -85,6 +95,7 @@ class GenericSubtitlesFile(object):
self.subs_ = subs
self._sub_format = sub_format
self._encoding = encoding
self._styles = kwargs.pop('styles', None)
def set_encoding(self, encoding):
if encoding != 'same':
@ -105,6 +116,10 @@ class GenericSubtitlesFile(object):
def encoding(self):
return self._encoding
@property
def styles(self):
return self._styles
def gen_raw_resolved_subs(self):
for sub in self.subs_:
yield sub.resolve_inner_timestamps()
@ -118,7 +133,8 @@ class GenericSubtitlesFile(object):
return GenericSubtitlesFile(
offset_subs,
sub_format=self.sub_format,
encoding=self.encoding
encoding=self.encoding,
styles=self.styles
)
def write_file(self, fname):
@ -133,6 +149,7 @@ class GenericSubtitlesFile(object):
elif out_format in ('ssa', 'ass'):
ssaf = pysubs2.SSAFile()
ssaf.events = subs
ssaf.styles = self.styles
to_write = ssaf.to_string(out_format)
else:
raise NotImplementedError('unsupported output format: %s' % out_format)

View file

@ -0,0 +1,70 @@
"""Python program for golden section search (straight-up copied from Wikipedia).
This implementation reuses function evaluations, saving 1/2 of the evaluations per
iteration, and returns a bounding interval."""
import logging
import math
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
invphi = (math.sqrt(5) - 1) / 2 # 1 / phi
invphi2 = (3 - math.sqrt(5)) / 2 # 1 / phi^2
def gss(f, a, b, tol=1e-4):
"""Golden-section search.
Given a function f with a single local minimum in
the interval [a,b], gss returns a subset interval
[c,d] that contains the minimum with d-c <= tol.
Example:
>>> f = lambda x: (x-2)**2
>>> a = 1
>>> b = 5
>>> tol = 1e-5
>>> (c,d) = gss(f, a, b, tol)
>>> print(c, d)
1.9999959837979107 2.0000050911830893
"""
(a, b) = (min(a, b), max(a, b))
h = b - a
if h <= tol:
return a, b
# Required steps to achieve tolerance
n = int(math.ceil(math.log(tol / h) / math.log(invphi)))
logger.info('About to perform %d iterations of golden section search to find the best framerate', n)
def f_wrapped(x, is_last_iter):
try:
return f(x, is_last_iter)
except TypeError:
return f(x)
c = a + invphi2 * h
d = a + invphi * h
yc = f_wrapped(c, n==1)
yd = f_wrapped(d, n==1)
for k in range(n-1):
if yc < yd:
b = d
d = c
yd = yc
h = invphi * h
c = a + invphi2 * h
yc = f_wrapped(c, k==n-2)
else:
a = c
c = d
yc = yd
h = invphi * h
d = a + invphi * h
yd = f(d, k==n-2)
if yc < yd:
return a, d
else:
return c, b

View file

@ -42,18 +42,24 @@ def make_subtitle_speech_pipeline(
assert parser.encoding == encoding
assert parser.max_subtitle_seconds == max_subtitle_seconds
assert parser.start_seconds == start_seconds
return Pipeline([
('parse', parser),
('scale', SubtitleScaler(scale_factor)),
('speech_extract', SubtitleSpeechTransformer(
sample_rate=SAMPLE_RATE,
start_seconds=start_seconds,
framerate_ratio=scale_factor,
))
])
def subpipe_maker(framerate_ratio):
return Pipeline([
('parse', parser),
('scale', SubtitleScaler(framerate_ratio)),
('speech_extract', SubtitleSpeechTransformer(
sample_rate=SAMPLE_RATE,
start_seconds=start_seconds,
framerate_ratio=framerate_ratio,
))
])
if scale_factor is None:
return subpipe_maker
else:
return subpipe_maker(scale_factor)
def _make_auditok_detector(sample_rate, frame_rate):
def _make_auditok_detector(sample_rate, frame_rate, non_speech_label):
try:
from auditok import \
BufferAudioSource, ADSFactory, AudioEnergyValidator, StreamTokenizer
@ -70,31 +76,37 @@ def _make_auditok_detector(sample_rate, frame_rate):
bytes_per_frame = 2
frames_per_window = frame_rate // sample_rate
validator = AudioEnergyValidator(
sample_width=bytes_per_frame, energy_threshold=50)
sample_width=bytes_per_frame, energy_threshold=50
)
tokenizer = StreamTokenizer(
validator=validator, min_length=0.2*sample_rate,
max_length=int(5*sample_rate),
max_continuous_silence=0.25*sample_rate)
validator=validator,
min_length=0.2 * sample_rate,
max_length=int(5 * sample_rate),
max_continuous_silence=0.25 * sample_rate
)
def _detect(asegment):
asource = BufferAudioSource(data_buffer=asegment,
sampling_rate=frame_rate,
sample_width=bytes_per_frame,
channels=1)
asource = BufferAudioSource(
data_buffer=asegment,
sampling_rate=frame_rate,
sample_width=bytes_per_frame,
channels=1
)
ads = ADSFactory.ads(audio_source=asource, block_dur=1./sample_rate)
ads.open()
tokens = tokenizer.tokenize(ads)
length = (len(asegment)//bytes_per_frame
+ frames_per_window - 1)//frames_per_window
media_bstring = np.zeros(length+1, dtype=int)
length = (
len(asegment)//bytes_per_frame + frames_per_window - 1
) // frames_per_window
media_bstring = np.zeros(length + 1)
for token in tokens:
media_bstring[token[1]] += 1
media_bstring[token[2]+1] -= 1
return (np.cumsum(media_bstring)[:-1] > 0).astype(float)
media_bstring[token[1]] = 1.
media_bstring[token[2] + 1] = non_speech_label - 1.
return np.clip(np.cumsum(media_bstring)[:-1], 0., 1.)
return _detect
def _make_webrtcvad_detector(sample_rate, frame_rate):
def _make_webrtcvad_detector(sample_rate, frame_rate, non_speech_label):
import webrtcvad
vad = webrtcvad.Vad()
vad.set_mode(3) # set non-speech pruning aggressiveness from 0 to 3
@ -117,17 +129,41 @@ def _make_webrtcvad_detector(sample_rate, frame_rate):
is_speech = False
failures += 1
# webrtcvad has low recall on mode 3, so treat non-speech as "not sure"
media_bstring.append(1. if is_speech else 0.5)
media_bstring.append(1. if is_speech else non_speech_label)
return np.array(media_bstring)
return _detect
class ComputeSpeechFrameBoundariesMixin(object):
def __init__(self):
self.start_frame_ = None
self.end_frame_ = None
@property
def num_frames(self):
if self.start_frame_ is None or self.end_frame_ is None:
return None
return self.end_frame_ - self.start_frame_
def fit_boundaries(self, speech_frames):
nz = np.nonzero(speech_frames > 0.5)[0]
if len(nz) > 0:
self.start_frame_ = np.min(nz)
self.end_frame_ = np.max(nz)
return self
class VideoSpeechTransformer(TransformerMixin):
def __init__(self, vad, sample_rate, frame_rate, start_seconds=0, ffmpeg_path=None, ref_stream=None, vlc_mode=False, gui_mode=False):
def __init__(
self, vad, sample_rate, frame_rate, non_speech_label, start_seconds=0,
ffmpeg_path=None, ref_stream=None, vlc_mode=False, gui_mode=False
):
super(VideoSpeechTransformer, self).__init__()
self.vad = vad
self.sample_rate = sample_rate
self.frame_rate = frame_rate
self._non_speech_label = non_speech_label
self.start_seconds = start_seconds
self.ffmpeg_path = ffmpeg_path
self.ref_stream = ref_stream
@ -159,12 +195,17 @@ class VideoSpeechTransformer(TransformerMixin):
break
pipe = make_subtitle_speech_pipeline(start_seconds=self.start_seconds).fit(output)
speech_step = pipe.steps[-1][1]
embedded_subs.append(speech_step.subtitle_speech_results_)
embedded_subs.append(speech_step)
embedded_subs_times.append(speech_step.max_time_)
if len(embedded_subs) == 0:
raise ValueError('Video file appears to lack subtitle stream')
if self.ref_stream is None:
error_msg = 'Video file appears to lack subtitle stream'
else:
error_msg = 'Stream {} not found'.format(self.ref_stream)
raise ValueError(error_msg)
# use longest set of embedded subs
self.video_speech_results_ = embedded_subs[int(np.argmax(embedded_subs_times))]
subs_to_use = embedded_subs[int(np.argmax(embedded_subs_times))]
self.video_speech_results_ = subs_to_use.subtitle_speech_results_
def fit(self, fname, *_):
if 'subs' in self.vad and (self.ref_stream is None or self.ref_stream.startswith('0:s:')):
@ -183,9 +224,9 @@ class VideoSpeechTransformer(TransformerMixin):
logger.warning(e)
total_duration = None
if 'webrtc' in self.vad:
detector = _make_webrtcvad_detector(self.sample_rate, self.frame_rate)
detector = _make_webrtcvad_detector(self.sample_rate, self.frame_rate, self._non_speech_label)
elif 'auditok' in self.vad:
detector = _make_auditok_detector(self.sample_rate, self.frame_rate)
detector = _make_auditok_detector(self.sample_rate, self.frame_rate, self._non_speech_label)
else:
raise ValueError('unknown vad: %s' % self.vad)
media_bstring = []
@ -257,8 +298,33 @@ class VideoSpeechTransformer(TransformerMixin):
return self.video_speech_results_
class SubtitleSpeechTransformer(TransformerMixin):
_PAIRED_NESTER = {
'(': ')',
'{': '}',
'[': ']',
# FIXME: False positive sometimes when there are html tags, e.g. <i> Hello? </i>
# '<': '>',
}
# TODO: need way better metadata detector
def _is_metadata(content, is_beginning_or_end):
content = content.strip()
if len(content) == 0:
return True
if content[0] in _PAIRED_NESTER.keys() and content[-1] == _PAIRED_NESTER[content[0]]:
return True
if is_beginning_or_end:
if 'english' in content.lower():
return True
if ' - ' in content:
return True
return False
class SubtitleSpeechTransformer(TransformerMixin, ComputeSpeechFrameBoundariesMixin):
def __init__(self, sample_rate, start_seconds=0, framerate_ratio=1.):
super(SubtitleSpeechTransformer, self).__init__()
self.sample_rate = sample_rate
self.start_seconds = start_seconds
self.framerate_ratio = framerate_ratio
@ -271,12 +337,19 @@ class SubtitleSpeechTransformer(TransformerMixin):
max_time = max(max_time, sub.end.total_seconds())
self.max_time_ = max_time - self.start_seconds
samples = np.zeros(int(max_time * self.sample_rate) + 2, dtype=float)
for sub in subs:
start_frame = float('inf')
end_frame = 0
for i, sub in enumerate(subs):
if _is_metadata(sub.content, i == 0 or i + 1 == len(subs)):
continue
start = int(round((sub.start.total_seconds() - self.start_seconds) * self.sample_rate))
start_frame = min(start_frame, start)
duration = sub.end.total_seconds() - sub.start.total_seconds()
end = start + int(round(duration * self.sample_rate))
end_frame = max(end_frame, end)
samples[start:end] = min(1. / self.framerate_ratio, 1.)
self.subtitle_speech_results_ = samples
self.fit_boundaries(self.subtitle_speech_results_)
return self
def transform(self, *_):
@ -284,7 +357,9 @@ class SubtitleSpeechTransformer(TransformerMixin):
class DeserializeSpeechTransformer(TransformerMixin):
def __init__(self):
def __init__(self, non_speech_label):
super(DeserializeSpeechTransformer, self).__init__()
self._non_speech_label = non_speech_label
self.deserialized_speech_results_ = None
def fit(self, fname, *_):
@ -295,6 +370,7 @@ class DeserializeSpeechTransformer(TransformerMixin):
else:
raise ValueError('could not find "speech" array in '
'serialized file; only contains: %s' % speech.files)
speech[speech < 1.] = self._non_speech_label
self.deserialized_speech_results_ = speech
return self

7
libs/ffsubsync/subtitle_parser.py Normal file → Executable file
View file

@ -76,7 +76,7 @@ class GenericSubtitleParser(SubsMixin, TransformerMixin):
self.start_seconds = start_seconds
def fit(self, fname, *_):
if self.caching and self.fit_fname == fname:
if self.caching and self.fit_fname == ('<stdin>' if fname is None else fname):
return self
encodings_to_try = (self.encoding,)
with open_file(fname, 'rb') as f:
@ -100,9 +100,10 @@ class GenericSubtitleParser(SubsMixin, TransformerMixin):
max_subtitle_seconds=self.max_subtitle_seconds,
start_seconds=self.start_seconds),
sub_format=self.sub_format,
encoding=encoding
encoding=encoding,
styles=parsed_subs.styles if isinstance(parsed_subs, pysubs2.SSAFile) else None
)
self.fit_fname = fname
self.fit_fname = '<stdin>' if fname is None else fname
if len(encodings_to_try) > 1:
self.detected_encoding_ = encoding
logger.info('detected encoding: %s' % self.detected_encoding_)

View file

@ -44,7 +44,12 @@ class SubtitleScaler(SubsMixin, TransformerMixin):
sub.inner
)
)
self.subs_ = GenericSubtitlesFile(scaled_subs, sub_format=subs.sub_format, encoding=subs.encoding)
self.subs_ = GenericSubtitlesFile(
scaled_subs,
sub_format=subs.sub_format,
encoding=subs.encoding,
styles=subs.styles
)
return self
def transform(self, *_):

84
libs/pygments/__init__.py Normal file
View file

@ -0,0 +1,84 @@
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments master branch:
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from io import StringIO, BytesIO
__version__ = '2.8.1'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method get_tokens' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)

17
libs/pygments/__main__.py Normal file
View file

@ -0,0 +1,17 @@
"""
pygments.__main__
~~~~~~~~~~~~~~~~~
Main entry point for ``python -m pygments``.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import pygments.cmdline
try:
sys.exit(pygments.cmdline.main(sys.argv))
except KeyboardInterrupt:
sys.exit(1)

602
libs/pygments/cmdline.py Normal file
View file

@ -0,0 +1,602 @@
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import shutil
import argparse
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding, \
UnclosingTextIOWrapper
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
from pygments.lexers.special import TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str.strip():
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = get_lexer_by_name(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
return 0
except (AttributeError, ValueError):
print("%s not found!" % what, file=sys.stderr)
return 1
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main_inner(parser, argns):
if argns.help:
parser.print_help()
return 0
if argns.V:
print('Pygments version %s, (c) 2006-2021 by Georg Brandl, Matthäus '
'Chajdas and contributors.' % __version__)
return 0
def is_only_option(opt):
return not any(v for (k, v) in vars(argns).items() if k != opt)
# handle ``pygmentize -L``
if argns.L is not None:
if not is_only_option('L'):
parser.print_help(sys.stderr)
return 2
# print version
main(['', '-V'])
allowed_types = {'lexer', 'formatter', 'filter', 'style'}
largs = [arg.rstrip('s') for arg in argns.L]
if any(arg not in allowed_types for arg in largs):
parser.print_help(sys.stderr)
return 0
if not largs:
largs = allowed_types
for arg in largs:
_print_list(arg)
return 0
# handle ``pygmentize -H``
if argns.H:
if not is_only_option('H'):
parser.print_help(sys.stderr)
return 2
what, name = argns.H
if what not in ('lexer', 'formatter', 'filter'):
parser.print_help(sys.stderr)
return 2
return _print_help(what, name)
# parse -O options
parsed_opts = _parse_options(argns.O or [])
# parse -P options
for p_opt in argns.P or []:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
if argns.N:
lexer = find_lexer_class_for_filename(argns.N)
if lexer is None:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -C``
if argns.C:
inp = sys.stdin.buffer.read()
try:
lexer = guess_lexer(inp, inencoding=inencoding)
except ClassNotFound:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = argns.S
a_opt = argns.a
if S_opt is not None:
f_opt = argns.f
if not f_opt:
parser.print_help(sys.stderr)
return 2
if argns.l or argns.INPUTFILE:
parser.print_help(sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
print(fmter.get_style_defs(a_opt or ''))
return 0
# if no -S is given, -a is not allowed
if argns.a is not None:
parser.print_help(sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(argns.F or [])
# -x: allow custom (eXternal) lexers and formatters
allow_custom_lexer_formatter = bool(argns.x)
# select lexer
lexer = None
# given by name?
lexername = argns.l
if lexername:
# custom lexer, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in lexername:
try:
filename = None
name = None
if ':' in lexername:
filename, name = lexername.rsplit(':', 1)
if '.py' in name:
# This can happen on Windows: If the lexername is
# C:\lexer.py -- return to normal load path in that case
name = None
if filename and name:
lexer = load_lexer_from_file(filename, name,
**parsed_opts)
else:
lexer = load_lexer_from_file(lexername, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if argns.INPUTFILE:
if argns.s:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 2
infn = argns.INPUTFILE
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if argns.g:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif not argns.s: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else: # -s option needs a lexer with -l
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 2
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# select formatter
outfn = argns.o
fmter = argns.f
if fmter:
# custom formatter, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in fmter:
try:
filename = None
name = None
if ':' in fmter:
# Same logic as above for custom lexer
filename, name = fmter.rsplit(':', 1)
if '.py' in name:
name = None
if filename and name:
fmter = load_formatter_from_file(filename, name,
**parsed_opts)
else:
fmter = load_formatter_from_file(fmter, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
if '256' in os.environ.get('TERM', ''):
fmter = Terminal256Formatter(**parsed_opts)
else:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout.buffer
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
if not argns.s:
# process whole input as per normal...
try:
highlight(code, lexer, fmter, outfile)
finally:
if outfn:
outfile.close()
return 0
else:
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
line = sys.stdin.buffer.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
return 0
except KeyboardInterrupt: # pragma: no cover
return 0
finally:
if outfn:
outfile.close()
class HelpFormatter(argparse.HelpFormatter):
def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
if width is None:
try:
width = shutil.get_terminal_size().columns - 2
except Exception:
pass
argparse.HelpFormatter.__init__(self, prog, indent_increment,
max_help_position, width)
def main(args=sys.argv):
"""
Main command line entry point.
"""
desc = "Highlight an input file and write the result to an output file."
parser = argparse.ArgumentParser(description=desc, add_help=False,
formatter_class=HelpFormatter)
operation = parser.add_argument_group('Main operation')
lexersel = operation.add_mutually_exclusive_group()
lexersel.add_argument(
'-l', metavar='LEXER',
help='Specify the lexer to use. (Query names with -L.) If not '
'given and -g is not present, the lexer is guessed from the filename.')
lexersel.add_argument(
'-g', action='store_true',
help='Guess the lexer from the file contents, or pass through '
'as plain text if nothing can be guessed.')
operation.add_argument(
'-F', metavar='FILTER[:options]', action='append',
help='Add a filter to the token stream. (Query names with -L.) '
'Filter options are given after a colon if necessary.')
operation.add_argument(
'-f', metavar='FORMATTER',
help='Specify the formatter to use. (Query names with -L.) '
'If not given, the formatter is guessed from the output filename, '
'and defaults to the terminal formatter if the output is to the '
'terminal or an unknown file extension.')
operation.add_argument(
'-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
help='Give options to the lexer and formatter as a comma-separated '
'list of key-value pairs. '
'Example: `-O bg=light,python=cool`.')
operation.add_argument(
'-P', metavar='OPTION=value', action='append',
help='Give a single option to the lexer and formatter - with this '
'you can pass options whose value contains commas and equal signs. '
'Example: `-P "heading=Pygments, the Python highlighter"`.')
operation.add_argument(
'-o', metavar='OUTPUTFILE',
help='Where to write the output. Defaults to standard output.')
operation.add_argument(
'INPUTFILE', nargs='?',
help='Where to read the input. Defaults to standard input.')
flags = parser.add_argument_group('Operation flags')
flags.add_argument(
'-v', action='store_true',
help='Print a detailed traceback on unhandled exceptions, which '
'is useful for debugging and bug reports.')
flags.add_argument(
'-s', action='store_true',
help='Process lines one at a time until EOF, rather than waiting to '
'process the entire file. This only works for stdin, only for lexers '
'with no line-spanning constructs, and is intended for streaming '
'input such as you get from `tail -f`. '
'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
flags.add_argument(
'-x', action='store_true',
help='Allow custom lexers and formatters to be loaded from a .py file '
'relative to the current working directory. For example, '
'`-l ./customlexer.py -x`. By default, this option expects a file '
'with a class named CustomLexer or CustomFormatter; you can also '
'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
'Users should be very careful not to use this option with untrusted '
'files, because it will import and run them.')
special_modes_group = parser.add_argument_group(
'Special modes - do not do any highlighting')
special_modes = special_modes_group.add_mutually_exclusive_group()
special_modes.add_argument(
'-S', metavar='STYLE -f formatter',
help='Print style definitions for STYLE for a formatter '
'given with -f. The argument given by -a is formatter '
'dependent.')
special_modes.add_argument(
'-L', nargs='*', metavar='WHAT',
help='List lexers, formatters, styles or filters -- '
'give additional arguments for the thing(s) you want to list '
'(e.g. "styles"), or omit them to list everything.')
special_modes.add_argument(
'-N', metavar='FILENAME',
help='Guess and print out a lexer name based solely on the given '
'filename. Does not take input or highlight anything. If no specific '
'lexer can be determined, "text" is printed.')
special_modes.add_argument(
'-C', action='store_true',
help='Like -N, but print out a lexer name based solely on '
'a given content from standard input.')
special_modes.add_argument(
'-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
help='Print detailed help for the object <name> of type <type>, '
'where <type> is one of "lexer", "formatter" or "filter".')
special_modes.add_argument(
'-V', action='store_true',
help='Print the package version.')
special_modes.add_argument(
'-h', '--help', action='store_true',
help='Print this help.')
special_modes_group.add_argument(
'-a', metavar='ARG',
help='Formatter-specific additional argument for the -S (print '
'style sheet) mode.')
argns = parser.parse_args(args[1:])
try:
return main_inner(parser, argns)
except Exception:
if argns.v:
print(file=sys.stderr)
print('*' * 65, file=sys.stderr)
print('An unhandled exception occurred while highlighting.',
file=sys.stderr)
print('Please report the whole traceback to the issue tracker at',
file=sys.stderr)
print('<https://github.com/pygments/pygments/issues>.',
file=sys.stderr)
print('*' * 65, file=sys.stderr)
print(file=sys.stderr)
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
print('*** If this is a bug you want to report, please rerun with -v.',
file=sys.stderr)
return 1

70
libs/pygments/console.py Normal file
View file

@ -0,0 +1,70 @@
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "red", "green", "yellow", "blue",
"magenta", "cyan", "gray"]
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
"brightmagenta", "brightcyan", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%im" % (60 + x)
x += 1
del d, l, x
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)

71
libs/pygments/filter.py Normal file
View file

@ -0,0 +1,71 @@
"""
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
yield from filter_.filter(lexer, stream)
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(self, lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__,
'function': f,
})
class Filter:
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable=not-callable
yield from self.function(lexer, stream, self.options)

View file

@ -0,0 +1,937 @@
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""Return a generator of all filter names."""
yield from FILTERS
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
yield from _replace_special(ttype, value, regex, Comment.Special)
else:
yield ttype, value
class SymbolFilter(Filter):
"""Convert mathematical symbols such as \\<longrightarrow> in Isabelle
or \\longrightarrow in LaTeX into Unicode characters.
This is mostly useful for HTML or console output when you want to
approximate the source rendering you'd see in an IDE.
Options accepted:
`lang` : string
The symbol language. Must be one of ``'isabelle'`` or
``'latex'``. The default is ``'isabelle'``.
"""
latex_symbols = {
'\\alpha' : '\U000003b1',
'\\beta' : '\U000003b2',
'\\gamma' : '\U000003b3',
'\\delta' : '\U000003b4',
'\\varepsilon' : '\U000003b5',
'\\zeta' : '\U000003b6',
'\\eta' : '\U000003b7',
'\\vartheta' : '\U000003b8',
'\\iota' : '\U000003b9',
'\\kappa' : '\U000003ba',
'\\lambda' : '\U000003bb',
'\\mu' : '\U000003bc',
'\\nu' : '\U000003bd',
'\\xi' : '\U000003be',
'\\pi' : '\U000003c0',
'\\varrho' : '\U000003c1',
'\\sigma' : '\U000003c3',
'\\tau' : '\U000003c4',
'\\upsilon' : '\U000003c5',
'\\varphi' : '\U000003c6',
'\\chi' : '\U000003c7',
'\\psi' : '\U000003c8',
'\\omega' : '\U000003c9',
'\\Gamma' : '\U00000393',
'\\Delta' : '\U00000394',
'\\Theta' : '\U00000398',
'\\Lambda' : '\U0000039b',
'\\Xi' : '\U0000039e',
'\\Pi' : '\U000003a0',
'\\Sigma' : '\U000003a3',
'\\Upsilon' : '\U000003a5',
'\\Phi' : '\U000003a6',
'\\Psi' : '\U000003a8',
'\\Omega' : '\U000003a9',
'\\leftarrow' : '\U00002190',
'\\longleftarrow' : '\U000027f5',
'\\rightarrow' : '\U00002192',
'\\longrightarrow' : '\U000027f6',
'\\Leftarrow' : '\U000021d0',
'\\Longleftarrow' : '\U000027f8',
'\\Rightarrow' : '\U000021d2',
'\\Longrightarrow' : '\U000027f9',
'\\leftrightarrow' : '\U00002194',
'\\longleftrightarrow' : '\U000027f7',
'\\Leftrightarrow' : '\U000021d4',
'\\Longleftrightarrow' : '\U000027fa',
'\\mapsto' : '\U000021a6',
'\\longmapsto' : '\U000027fc',
'\\relbar' : '\U00002500',
'\\Relbar' : '\U00002550',
'\\hookleftarrow' : '\U000021a9',
'\\hookrightarrow' : '\U000021aa',
'\\leftharpoondown' : '\U000021bd',
'\\rightharpoondown' : '\U000021c1',
'\\leftharpoonup' : '\U000021bc',
'\\rightharpoonup' : '\U000021c0',
'\\rightleftharpoons' : '\U000021cc',
'\\leadsto' : '\U0000219d',
'\\downharpoonleft' : '\U000021c3',
'\\downharpoonright' : '\U000021c2',
'\\upharpoonleft' : '\U000021bf',
'\\upharpoonright' : '\U000021be',
'\\restriction' : '\U000021be',
'\\uparrow' : '\U00002191',
'\\Uparrow' : '\U000021d1',
'\\downarrow' : '\U00002193',
'\\Downarrow' : '\U000021d3',
'\\updownarrow' : '\U00002195',
'\\Updownarrow' : '\U000021d5',
'\\langle' : '\U000027e8',
'\\rangle' : '\U000027e9',
'\\lceil' : '\U00002308',
'\\rceil' : '\U00002309',
'\\lfloor' : '\U0000230a',
'\\rfloor' : '\U0000230b',
'\\flqq' : '\U000000ab',
'\\frqq' : '\U000000bb',
'\\bot' : '\U000022a5',
'\\top' : '\U000022a4',
'\\wedge' : '\U00002227',
'\\bigwedge' : '\U000022c0',
'\\vee' : '\U00002228',
'\\bigvee' : '\U000022c1',
'\\forall' : '\U00002200',
'\\exists' : '\U00002203',
'\\nexists' : '\U00002204',
'\\neg' : '\U000000ac',
'\\Box' : '\U000025a1',
'\\Diamond' : '\U000025c7',
'\\vdash' : '\U000022a2',
'\\models' : '\U000022a8',
'\\dashv' : '\U000022a3',
'\\surd' : '\U0000221a',
'\\le' : '\U00002264',
'\\ge' : '\U00002265',
'\\ll' : '\U0000226a',
'\\gg' : '\U0000226b',
'\\lesssim' : '\U00002272',
'\\gtrsim' : '\U00002273',
'\\lessapprox' : '\U00002a85',
'\\gtrapprox' : '\U00002a86',
'\\in' : '\U00002208',
'\\notin' : '\U00002209',
'\\subset' : '\U00002282',
'\\supset' : '\U00002283',
'\\subseteq' : '\U00002286',
'\\supseteq' : '\U00002287',
'\\sqsubset' : '\U0000228f',
'\\sqsupset' : '\U00002290',
'\\sqsubseteq' : '\U00002291',
'\\sqsupseteq' : '\U00002292',
'\\cap' : '\U00002229',
'\\bigcap' : '\U000022c2',
'\\cup' : '\U0000222a',
'\\bigcup' : '\U000022c3',
'\\sqcup' : '\U00002294',
'\\bigsqcup' : '\U00002a06',
'\\sqcap' : '\U00002293',
'\\Bigsqcap' : '\U00002a05',
'\\setminus' : '\U00002216',
'\\propto' : '\U0000221d',
'\\uplus' : '\U0000228e',
'\\bigplus' : '\U00002a04',
'\\sim' : '\U0000223c',
'\\doteq' : '\U00002250',
'\\simeq' : '\U00002243',
'\\approx' : '\U00002248',
'\\asymp' : '\U0000224d',
'\\cong' : '\U00002245',
'\\equiv' : '\U00002261',
'\\Join' : '\U000022c8',
'\\bowtie' : '\U00002a1d',
'\\prec' : '\U0000227a',
'\\succ' : '\U0000227b',
'\\preceq' : '\U0000227c',
'\\succeq' : '\U0000227d',
'\\parallel' : '\U00002225',
'\\mid' : '\U000000a6',
'\\pm' : '\U000000b1',
'\\mp' : '\U00002213',
'\\times' : '\U000000d7',
'\\div' : '\U000000f7',
'\\cdot' : '\U000022c5',
'\\star' : '\U000022c6',
'\\circ' : '\U00002218',
'\\dagger' : '\U00002020',
'\\ddagger' : '\U00002021',
'\\lhd' : '\U000022b2',
'\\rhd' : '\U000022b3',
'\\unlhd' : '\U000022b4',
'\\unrhd' : '\U000022b5',
'\\triangleleft' : '\U000025c3',
'\\triangleright' : '\U000025b9',
'\\triangle' : '\U000025b3',
'\\triangleq' : '\U0000225c',
'\\oplus' : '\U00002295',
'\\bigoplus' : '\U00002a01',
'\\otimes' : '\U00002297',
'\\bigotimes' : '\U00002a02',
'\\odot' : '\U00002299',
'\\bigodot' : '\U00002a00',
'\\ominus' : '\U00002296',
'\\oslash' : '\U00002298',
'\\dots' : '\U00002026',
'\\cdots' : '\U000022ef',
'\\sum' : '\U00002211',
'\\prod' : '\U0000220f',
'\\coprod' : '\U00002210',
'\\infty' : '\U0000221e',
'\\int' : '\U0000222b',
'\\oint' : '\U0000222e',
'\\clubsuit' : '\U00002663',
'\\diamondsuit' : '\U00002662',
'\\heartsuit' : '\U00002661',
'\\spadesuit' : '\U00002660',
'\\aleph' : '\U00002135',
'\\emptyset' : '\U00002205',
'\\nabla' : '\U00002207',
'\\partial' : '\U00002202',
'\\flat' : '\U0000266d',
'\\natural' : '\U0000266e',
'\\sharp' : '\U0000266f',
'\\angle' : '\U00002220',
'\\copyright' : '\U000000a9',
'\\textregistered' : '\U000000ae',
'\\textonequarter' : '\U000000bc',
'\\textonehalf' : '\U000000bd',
'\\textthreequarters' : '\U000000be',
'\\textordfeminine' : '\U000000aa',
'\\textordmasculine' : '\U000000ba',
'\\euro' : '\U000020ac',
'\\pounds' : '\U000000a3',
'\\yen' : '\U000000a5',
'\\textcent' : '\U000000a2',
'\\textcurrency' : '\U000000a4',
'\\textdegree' : '\U000000b0',
}
isabelle_symbols = {
'\\<zero>' : '\U0001d7ec',
'\\<one>' : '\U0001d7ed',
'\\<two>' : '\U0001d7ee',
'\\<three>' : '\U0001d7ef',
'\\<four>' : '\U0001d7f0',
'\\<five>' : '\U0001d7f1',
'\\<six>' : '\U0001d7f2',
'\\<seven>' : '\U0001d7f3',
'\\<eight>' : '\U0001d7f4',
'\\<nine>' : '\U0001d7f5',
'\\<A>' : '\U0001d49c',
'\\<B>' : '\U0000212c',
'\\<C>' : '\U0001d49e',
'\\<D>' : '\U0001d49f',
'\\<E>' : '\U00002130',
'\\<F>' : '\U00002131',
'\\<G>' : '\U0001d4a2',
'\\<H>' : '\U0000210b',
'\\<I>' : '\U00002110',
'\\<J>' : '\U0001d4a5',
'\\<K>' : '\U0001d4a6',
'\\<L>' : '\U00002112',
'\\<M>' : '\U00002133',
'\\<N>' : '\U0001d4a9',
'\\<O>' : '\U0001d4aa',
'\\<P>' : '\U0001d4ab',
'\\<Q>' : '\U0001d4ac',
'\\<R>' : '\U0000211b',
'\\<S>' : '\U0001d4ae',
'\\<T>' : '\U0001d4af',
'\\<U>' : '\U0001d4b0',
'\\<V>' : '\U0001d4b1',
'\\<W>' : '\U0001d4b2',
'\\<X>' : '\U0001d4b3',
'\\<Y>' : '\U0001d4b4',
'\\<Z>' : '\U0001d4b5',
'\\<a>' : '\U0001d5ba',
'\\<b>' : '\U0001d5bb',
'\\<c>' : '\U0001d5bc',
'\\<d>' : '\U0001d5bd',
'\\<e>' : '\U0001d5be',
'\\<f>' : '\U0001d5bf',
'\\<g>' : '\U0001d5c0',
'\\<h>' : '\U0001d5c1',
'\\<i>' : '\U0001d5c2',
'\\<j>' : '\U0001d5c3',
'\\<k>' : '\U0001d5c4',
'\\<l>' : '\U0001d5c5',
'\\<m>' : '\U0001d5c6',
'\\<n>' : '\U0001d5c7',
'\\<o>' : '\U0001d5c8',
'\\<p>' : '\U0001d5c9',
'\\<q>' : '\U0001d5ca',
'\\<r>' : '\U0001d5cb',
'\\<s>' : '\U0001d5cc',
'\\<t>' : '\U0001d5cd',
'\\<u>' : '\U0001d5ce',
'\\<v>' : '\U0001d5cf',
'\\<w>' : '\U0001d5d0',
'\\<x>' : '\U0001d5d1',
'\\<y>' : '\U0001d5d2',
'\\<z>' : '\U0001d5d3',
'\\<AA>' : '\U0001d504',
'\\<BB>' : '\U0001d505',
'\\<CC>' : '\U0000212d',
'\\<DD>' : '\U0001d507',
'\\<EE>' : '\U0001d508',
'\\<FF>' : '\U0001d509',
'\\<GG>' : '\U0001d50a',
'\\<HH>' : '\U0000210c',
'\\<II>' : '\U00002111',
'\\<JJ>' : '\U0001d50d',
'\\<KK>' : '\U0001d50e',
'\\<LL>' : '\U0001d50f',
'\\<MM>' : '\U0001d510',
'\\<NN>' : '\U0001d511',
'\\<OO>' : '\U0001d512',
'\\<PP>' : '\U0001d513',
'\\<QQ>' : '\U0001d514',
'\\<RR>' : '\U0000211c',
'\\<SS>' : '\U0001d516',
'\\<TT>' : '\U0001d517',
'\\<UU>' : '\U0001d518',
'\\<VV>' : '\U0001d519',
'\\<WW>' : '\U0001d51a',
'\\<XX>' : '\U0001d51b',
'\\<YY>' : '\U0001d51c',
'\\<ZZ>' : '\U00002128',
'\\<aa>' : '\U0001d51e',
'\\<bb>' : '\U0001d51f',
'\\<cc>' : '\U0001d520',
'\\<dd>' : '\U0001d521',
'\\<ee>' : '\U0001d522',
'\\<ff>' : '\U0001d523',
'\\<gg>' : '\U0001d524',
'\\<hh>' : '\U0001d525',
'\\<ii>' : '\U0001d526',
'\\<jj>' : '\U0001d527',
'\\<kk>' : '\U0001d528',
'\\<ll>' : '\U0001d529',
'\\<mm>' : '\U0001d52a',
'\\<nn>' : '\U0001d52b',
'\\<oo>' : '\U0001d52c',
'\\<pp>' : '\U0001d52d',
'\\<qq>' : '\U0001d52e',
'\\<rr>' : '\U0001d52f',
'\\<ss>' : '\U0001d530',
'\\<tt>' : '\U0001d531',
'\\<uu>' : '\U0001d532',
'\\<vv>' : '\U0001d533',
'\\<ww>' : '\U0001d534',
'\\<xx>' : '\U0001d535',
'\\<yy>' : '\U0001d536',
'\\<zz>' : '\U0001d537',
'\\<alpha>' : '\U000003b1',
'\\<beta>' : '\U000003b2',
'\\<gamma>' : '\U000003b3',
'\\<delta>' : '\U000003b4',
'\\<epsilon>' : '\U000003b5',
'\\<zeta>' : '\U000003b6',
'\\<eta>' : '\U000003b7',
'\\<theta>' : '\U000003b8',
'\\<iota>' : '\U000003b9',
'\\<kappa>' : '\U000003ba',
'\\<lambda>' : '\U000003bb',
'\\<mu>' : '\U000003bc',
'\\<nu>' : '\U000003bd',
'\\<xi>' : '\U000003be',
'\\<pi>' : '\U000003c0',
'\\<rho>' : '\U000003c1',
'\\<sigma>' : '\U000003c3',
'\\<tau>' : '\U000003c4',
'\\<upsilon>' : '\U000003c5',
'\\<phi>' : '\U000003c6',
'\\<chi>' : '\U000003c7',
'\\<psi>' : '\U000003c8',
'\\<omega>' : '\U000003c9',
'\\<Gamma>' : '\U00000393',
'\\<Delta>' : '\U00000394',
'\\<Theta>' : '\U00000398',
'\\<Lambda>' : '\U0000039b',
'\\<Xi>' : '\U0000039e',
'\\<Pi>' : '\U000003a0',
'\\<Sigma>' : '\U000003a3',
'\\<Upsilon>' : '\U000003a5',
'\\<Phi>' : '\U000003a6',
'\\<Psi>' : '\U000003a8',
'\\<Omega>' : '\U000003a9',
'\\<bool>' : '\U0001d539',
'\\<complex>' : '\U00002102',
'\\<nat>' : '\U00002115',
'\\<rat>' : '\U0000211a',
'\\<real>' : '\U0000211d',
'\\<int>' : '\U00002124',
'\\<leftarrow>' : '\U00002190',
'\\<longleftarrow>' : '\U000027f5',
'\\<rightarrow>' : '\U00002192',
'\\<longrightarrow>' : '\U000027f6',
'\\<Leftarrow>' : '\U000021d0',
'\\<Longleftarrow>' : '\U000027f8',
'\\<Rightarrow>' : '\U000021d2',
'\\<Longrightarrow>' : '\U000027f9',
'\\<leftrightarrow>' : '\U00002194',
'\\<longleftrightarrow>' : '\U000027f7',
'\\<Leftrightarrow>' : '\U000021d4',
'\\<Longleftrightarrow>' : '\U000027fa',
'\\<mapsto>' : '\U000021a6',
'\\<longmapsto>' : '\U000027fc',
'\\<midarrow>' : '\U00002500',
'\\<Midarrow>' : '\U00002550',
'\\<hookleftarrow>' : '\U000021a9',
'\\<hookrightarrow>' : '\U000021aa',
'\\<leftharpoondown>' : '\U000021bd',
'\\<rightharpoondown>' : '\U000021c1',
'\\<leftharpoonup>' : '\U000021bc',
'\\<rightharpoonup>' : '\U000021c0',
'\\<rightleftharpoons>' : '\U000021cc',
'\\<leadsto>' : '\U0000219d',
'\\<downharpoonleft>' : '\U000021c3',
'\\<downharpoonright>' : '\U000021c2',
'\\<upharpoonleft>' : '\U000021bf',
'\\<upharpoonright>' : '\U000021be',
'\\<restriction>' : '\U000021be',
'\\<Colon>' : '\U00002237',
'\\<up>' : '\U00002191',
'\\<Up>' : '\U000021d1',
'\\<down>' : '\U00002193',
'\\<Down>' : '\U000021d3',
'\\<updown>' : '\U00002195',
'\\<Updown>' : '\U000021d5',
'\\<langle>' : '\U000027e8',
'\\<rangle>' : '\U000027e9',
'\\<lceil>' : '\U00002308',
'\\<rceil>' : '\U00002309',
'\\<lfloor>' : '\U0000230a',
'\\<rfloor>' : '\U0000230b',
'\\<lparr>' : '\U00002987',
'\\<rparr>' : '\U00002988',
'\\<lbrakk>' : '\U000027e6',
'\\<rbrakk>' : '\U000027e7',
'\\<lbrace>' : '\U00002983',
'\\<rbrace>' : '\U00002984',
'\\<guillemotleft>' : '\U000000ab',
'\\<guillemotright>' : '\U000000bb',
'\\<bottom>' : '\U000022a5',
'\\<top>' : '\U000022a4',
'\\<and>' : '\U00002227',
'\\<And>' : '\U000022c0',
'\\<or>' : '\U00002228',
'\\<Or>' : '\U000022c1',
'\\<forall>' : '\U00002200',
'\\<exists>' : '\U00002203',
'\\<nexists>' : '\U00002204',
'\\<not>' : '\U000000ac',
'\\<box>' : '\U000025a1',
'\\<diamond>' : '\U000025c7',
'\\<turnstile>' : '\U000022a2',
'\\<Turnstile>' : '\U000022a8',
'\\<tturnstile>' : '\U000022a9',
'\\<TTurnstile>' : '\U000022ab',
'\\<stileturn>' : '\U000022a3',
'\\<surd>' : '\U0000221a',
'\\<le>' : '\U00002264',
'\\<ge>' : '\U00002265',
'\\<lless>' : '\U0000226a',
'\\<ggreater>' : '\U0000226b',
'\\<lesssim>' : '\U00002272',
'\\<greatersim>' : '\U00002273',
'\\<lessapprox>' : '\U00002a85',
'\\<greaterapprox>' : '\U00002a86',
'\\<in>' : '\U00002208',
'\\<notin>' : '\U00002209',
'\\<subset>' : '\U00002282',
'\\<supset>' : '\U00002283',
'\\<subseteq>' : '\U00002286',
'\\<supseteq>' : '\U00002287',
'\\<sqsubset>' : '\U0000228f',
'\\<sqsupset>' : '\U00002290',
'\\<sqsubseteq>' : '\U00002291',
'\\<sqsupseteq>' : '\U00002292',
'\\<inter>' : '\U00002229',
'\\<Inter>' : '\U000022c2',
'\\<union>' : '\U0000222a',
'\\<Union>' : '\U000022c3',
'\\<squnion>' : '\U00002294',
'\\<Squnion>' : '\U00002a06',
'\\<sqinter>' : '\U00002293',
'\\<Sqinter>' : '\U00002a05',
'\\<setminus>' : '\U00002216',
'\\<propto>' : '\U0000221d',
'\\<uplus>' : '\U0000228e',
'\\<Uplus>' : '\U00002a04',
'\\<noteq>' : '\U00002260',
'\\<sim>' : '\U0000223c',
'\\<doteq>' : '\U00002250',
'\\<simeq>' : '\U00002243',
'\\<approx>' : '\U00002248',
'\\<asymp>' : '\U0000224d',
'\\<cong>' : '\U00002245',
'\\<smile>' : '\U00002323',
'\\<equiv>' : '\U00002261',
'\\<frown>' : '\U00002322',
'\\<Join>' : '\U000022c8',
'\\<bowtie>' : '\U00002a1d',
'\\<prec>' : '\U0000227a',
'\\<succ>' : '\U0000227b',
'\\<preceq>' : '\U0000227c',
'\\<succeq>' : '\U0000227d',
'\\<parallel>' : '\U00002225',
'\\<bar>' : '\U000000a6',
'\\<plusminus>' : '\U000000b1',
'\\<minusplus>' : '\U00002213',
'\\<times>' : '\U000000d7',
'\\<div>' : '\U000000f7',
'\\<cdot>' : '\U000022c5',
'\\<star>' : '\U000022c6',
'\\<bullet>' : '\U00002219',
'\\<circ>' : '\U00002218',
'\\<dagger>' : '\U00002020',
'\\<ddagger>' : '\U00002021',
'\\<lhd>' : '\U000022b2',
'\\<rhd>' : '\U000022b3',
'\\<unlhd>' : '\U000022b4',
'\\<unrhd>' : '\U000022b5',
'\\<triangleleft>' : '\U000025c3',
'\\<triangleright>' : '\U000025b9',
'\\<triangle>' : '\U000025b3',
'\\<triangleq>' : '\U0000225c',
'\\<oplus>' : '\U00002295',
'\\<Oplus>' : '\U00002a01',
'\\<otimes>' : '\U00002297',
'\\<Otimes>' : '\U00002a02',
'\\<odot>' : '\U00002299',
'\\<Odot>' : '\U00002a00',
'\\<ominus>' : '\U00002296',
'\\<oslash>' : '\U00002298',
'\\<dots>' : '\U00002026',
'\\<cdots>' : '\U000022ef',
'\\<Sum>' : '\U00002211',
'\\<Prod>' : '\U0000220f',
'\\<Coprod>' : '\U00002210',
'\\<infinity>' : '\U0000221e',
'\\<integral>' : '\U0000222b',
'\\<ointegral>' : '\U0000222e',
'\\<clubsuit>' : '\U00002663',
'\\<diamondsuit>' : '\U00002662',
'\\<heartsuit>' : '\U00002661',
'\\<spadesuit>' : '\U00002660',
'\\<aleph>' : '\U00002135',
'\\<emptyset>' : '\U00002205',
'\\<nabla>' : '\U00002207',
'\\<partial>' : '\U00002202',
'\\<flat>' : '\U0000266d',
'\\<natural>' : '\U0000266e',
'\\<sharp>' : '\U0000266f',
'\\<angle>' : '\U00002220',
'\\<copyright>' : '\U000000a9',
'\\<registered>' : '\U000000ae',
'\\<hyphen>' : '\U000000ad',
'\\<inverse>' : '\U000000af',
'\\<onequarter>' : '\U000000bc',
'\\<onehalf>' : '\U000000bd',
'\\<threequarters>' : '\U000000be',
'\\<ordfeminine>' : '\U000000aa',
'\\<ordmasculine>' : '\U000000ba',
'\\<section>' : '\U000000a7',
'\\<paragraph>' : '\U000000b6',
'\\<exclamdown>' : '\U000000a1',
'\\<questiondown>' : '\U000000bf',
'\\<euro>' : '\U000020ac',
'\\<pounds>' : '\U000000a3',
'\\<yen>' : '\U000000a5',
'\\<cent>' : '\U000000a2',
'\\<currency>' : '\U000000a4',
'\\<degree>' : '\U000000b0',
'\\<amalg>' : '\U00002a3f',
'\\<mho>' : '\U00002127',
'\\<lozenge>' : '\U000025ca',
'\\<wp>' : '\U00002118',
'\\<wrong>' : '\U00002240',
'\\<struct>' : '\U000022c4',
'\\<acute>' : '\U000000b4',
'\\<index>' : '\U00000131',
'\\<dieresis>' : '\U000000a8',
'\\<cedilla>' : '\U000000b8',
'\\<hungarumlaut>' : '\U000002dd',
'\\<some>' : '\U000003f5',
'\\<newline>' : '\U000023ce',
'\\<open>' : '\U00002039',
'\\<close>' : '\U0000203a',
'\\<here>' : '\U00002302',
'\\<^sub>' : '\U000021e9',
'\\<^sup>' : '\U000021e7',
'\\<^bold>' : '\U00002759',
'\\<^bsub>' : '\U000021d8',
'\\<^esub>' : '\U000021d9',
'\\<^bsup>' : '\U000021d7',
'\\<^esup>' : '\U000021d6',
}
lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
def __init__(self, **options):
Filter.__init__(self, **options)
lang = get_choice_opt(options, 'lang',
['isabelle', 'latex'], 'isabelle')
self.symbols = self.lang_map[lang]
def filter(self, lexer, stream):
for ttype, value in stream:
if value in self.symbols:
yield ttype, self.symbols[value]
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case',
['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(str, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ````
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', '·'),
('tabs', '»'),
('newlines', '')]:
opt = options.get(name, False)
if isinstance(opt, str) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' ' * (tabsize - 1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
yield from _replace_special(ttype, value, regex, Whitespace,
replacefunc)
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""Merges consecutive tokens with the same token type in the output
stream of a lexer.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
'symbols': SymbolFilter,
}

View file

@ -0,0 +1,94 @@
"""
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, str):
return get_style_by_name(style)
return style
class Formatter:
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
if self.encoding in ('guess', 'chardet'):
# can happen for e.g. pygmentize -O encoding=guess
self.encoding = 'utf-8'
self.encoding = options.get('outencoding') or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)

View file

@ -0,0 +1,153 @@
"""
pygments.formatters
~~~~~~~~~~~~~~~~~~~
Pygments formatters.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
from pygments.util import ClassNotFound
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
_formatter_cache = {} # classes by name
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_formatters(module_name):
"""Load a formatter (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for formatter_name in mod.__all__:
cls = getattr(mod, formatter_name)
_formatter_cache[cls.name] = cls
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in FORMATTERS.values():
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in FORMATTERS.values():
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
def get_formatter_by_name(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options)
def load_formatter_from_file(filename, formattername="CustomFormatter",
**options):
"""Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except OSError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err)
def get_formatter_for_filename(fn, **options):
"""Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in FORMATTERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
class _automodule(types.ModuleType):
"""Automatically import formatters."""
def __getattr__(self, name):
info = FORMATTERS.get(name)
if info:
_load_formatters(info[0])
cls = _formatter_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types

View file

@ -0,0 +1,82 @@
"""
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter mapping definitions. This file is generated by itself. Everytime
you change something on a builtin formatter definition, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
FORMATTERS = {
'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.')
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup formatters
found_formatters = []
imports = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pygments.util import docstring_headline
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
formatter = getattr(module, formatter_name)
found_formatters.append(
'%r: %r' % (formatter_name,
(module_name,
formatter.name,
tuple(formatter.aliases),
tuple(formatter.filenames),
docstring_headline(formatter))))
# sort them to make the diff minimal
found_formatters.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('FORMATTERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
fp.write(footer)
print ('=== %d formatters processed.' % len(found_formatters))

View file

@ -0,0 +1,108 @@
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')

View file

@ -0,0 +1,954 @@
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import functools
import os
import sys
import os.path
from io import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): '&amp;',
ord('<'): '&lt;',
ord('>'): '&gt;',
ord('"'): '&quot;',
ord("'"): '&#39;',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def webify(color):
if color.startswith('calc') or color.startswith('var'):
return color
else:
return '#' + color
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
/*
generated by Pygments <https://pygments.org/>
Copyright 2006-2021 by the Pygments team.
Licensed under the BSD license, see LICENSE for details.
*/
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<!--
generated by Pygments <https://pygments.org/>
Copyright 2006-2021 by the Pygments team.
Licensed under the BSD license, see LICENSE for details.
-->
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`lineanchors` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags (as well as line number elements)
will not use CSS classes, but inline styles. This is not recommended
for larger pieces of code since it increases output size by quite a bit
(default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
`filename`
A string used to generate a filename when rendering ``<pre>`` blocks,
for example if displaying source code.
.. versionadded:: 2.1
`wrapcode`
Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
by the HTML5 specification.
.. versionadded:: 2.4
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
self.filename = self._decodeifneeded(options.get('filename', ''))
self.wrapcode = get_bool_opt(options, 'wrapcode', False)
self.span_element_openers = {}
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _get_css_classes(self, ttype):
"""Return the CSS classes of this token type prefixed with the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls or ''
def _get_css_inline_styles(self, ttype):
"""Return the inline CSS styles for this token type."""
cclass = self.ttype2class.get(ttype)
while cclass is None:
ttype = ttype.parent
cclass = self.ttype2class.get(ttype)
return cclass or ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: %s; ' % webify(ndef['color'])
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: %s; ' % webify(ndef['bgcolor'])
if ndef['border']:
style += 'border: 1px solid %s; ' % webify(ndef['border'])
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
style_lines = []
style_lines.extend(self.get_linenos_style_defs())
style_lines.extend(self.get_background_style_defs(arg))
style_lines.extend(self.get_token_style_defs(arg))
return '\n'.join(style_lines)
def get_token_style_defs(self, arg=None):
prefix = self.get_css_prefix(arg)
styles = [
(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.items()
if cls and style
]
styles.sort()
lines = [
'%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles
]
return lines
def get_background_style_defs(self, arg=None):
prefix = self.get_css_prefix(arg)
bg_color = self.style.background_color
hl_color = self.style.highlight_color
lines = []
if arg and not self.nobackground and bg_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(
0, '%s{ background: %s;%s }' % (
prefix(''), bg_color, text_style
)
)
if hl_color is not None:
lines.insert(
0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
)
return lines
def get_linenos_style_defs(self):
lines = [
'pre { %s }' % self._pre_style,
'td.linenos .normal { %s }' % self._linenos_style,
'span.linenos { %s }' % self._linenos_style,
'td.linenos .special { %s }' % self._linenos_special_style,
'span.linenos.special { %s }' % self._linenos_special_style,
]
return lines
def get_css_prefix(self, arg):
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, str):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
return prefix
@property
def _pre_style(self):
return 'line-height: 125%;'
@property
def _linenos_style(self):
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
self.style.line_number_color,
self.style.line_number_background_color
)
@property
def _linenos_special_style(self):
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
self.style.line_number_special_color,
self.style.line_number_special_background_color
)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, '
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
with open(cssfilename, "w") as cf:
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
except OSError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title=self.title,
cssfile=self.cssfile,
encoding=self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title=self.title,
styledefs=self.get_style_defs('body'),
encoding=self.encoding))
yield from inner
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
lines = []
for i in range(fl, fl+lncount):
print_line = i % st == 0
special_line = sp and i % sp == 0
if print_line:
line = '%*d' % (mw, i)
if aln:
line = '<a href="#%s-%d">%s</a>' % (la, i, line)
else:
line = ' ' * mw
if nocls:
if special_line:
style = ' style="%s"' % self._linenos_special_style
else:
style = ' style="%s"' % self._linenos_style
else:
if special_line:
style = ' class="special"'
else:
style = ' class="normal"'
if style:
line = '<span%s>%s</span>' % (style, line)
lines.append(line)
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
yield 0, (
'<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">'
)
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
inner_lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(inner_lines) + num - 1))
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
for _, inner_line in inner_lines:
print_line = num % st == 0
special_line = sp and num % sp == 0
if print_line:
line = '%*d' % (mw, num)
else:
line = ' ' * mw
if nocls:
if special_line:
style = ' style="%s"' % self._linenos_special_style
else:
style = ' style="%s"' % self._linenos_style
else:
if special_line:
style = ' class="linenos special"'
else:
style = ' class="linenos"'
if style:
linenos = '<span%s>%s</span>' % (style, line)
else:
linenos = line
if aln:
yield 1, ('<a href="#%s-%d">%s</a>' % (la, num, linenos) +
inner_line)
else:
yield 1, linenos + inner_line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
# subtract 1 since we have to increment i *before* yielding
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
(style and (' style="%s"' % style)) + '>')
yield from inner
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append(self._pre_style)
style = '; '.join(style)
if self.filename:
yield 0, ('<span class="filename">' + self.filename + '</span>')
# the empty span here is to keep leading empty lines from being
# ignored by HTML parsers
yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
yield from inner
yield 0, '</pre>'
def _wrap_code(self, inner):
yield 0, '<code>'
yield from inner
yield 0, '</code>'
@functools.lru_cache(maxsize=100)
def _translate_parts(self, value):
"""HTML-escape a value and split it by newlines."""
return value.translate(_escape_html_table).split('\n')
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
try:
cspan = self.span_element_openers[ttype]
except KeyError:
if nocls:
css_style = self._get_css_inline_styles(ttype)
cspan = css_style and '<span style="%s">' % self.class2style[css_style][0] or ''
else:
css_class = self._get_css_classes(ttype)
cspan = css_class and '<span class="%s">' % css_class or ''
self.span_element_openers[ttype] = cspan
parts = self._translate_parts(value)
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token, 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
if self.wrapcode:
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
else:
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
# As a special case, we wrap line numbers before line highlighting
# so the line numbers get wrapped in the highlighting tag.
if not self.nowrap and self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)

View file

@ -0,0 +1,641 @@
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
get_choice_opt
import subprocess
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
DEFAULT_FONT_NAME_MAC = 'Menlo'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager:
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
elif sys.platform.startswith('darwin'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_MAC
self._create_mac()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
stdout=subprocess.PIPE, stderr=None)
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
for line in lines:
if line.startswith(b'Fontconfig warning:'):
continue
path = line.decode().strip().strip(':')
if path:
return path
return None
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _get_mac_font_path(self, font_map, name, style):
return font_map.get((name + ' ' + style).strip().lower())
def _create_mac(self):
font_map = {}
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
for f in os.listdir(font_dir)
if f.lower().endswith(('ttf', 'ttc')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_mac_font_path(font_map, self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except OSError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
lookuperror = None
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
for keyname in keynames:
try:
key = _winreg.OpenKey(*keyname)
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
return
except FontNotFound as err:
lookuperror = err
finally:
_winreg.CloseKey(key)
except OSError:
pass
else:
# If we get here, we checked all registry keys and had no luck
# We can be in one of two situations now:
# * All key lookups failed. In this case lookuperror is None and we
# will raise a generic error
# * At least one lookup failed with a FontNotFound error. In this
# case, we will raise that as a more specific error
if lookuperror:
raise lookuperror
raise FontNotFound('Can\'t open Windows font registry key')
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_text_size(self, text):
"""
Get the text size(width, height).
"""
return self.fonts['NORMAL'].getsize(text)
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 0.10
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
"DejaVu Sans Mono" on \\*nix
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 1.2
Default: empty list
`hl_color`
Specify the color for highlighting lines.
.. versionadded:: 1.2
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
self.encoding = 'latin1' # let pygments.format() do the right thing
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, linelength):
"""
Get the X coordinate of a character position.
"""
return linelength + self.image_pad + self.line_number_width
def _get_text_pos(self, linelength, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(linelength), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_text_bg_color(self, style):
"""
Get the correct background color for the token from the style.
"""
if style['bgcolor'] is not None:
bg_color = '#' + style['bgcolor']
else:
bg_color = None
return bg_color
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxlinelength, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxlinelength) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
text_fg=self.line_number_fg,
text_bg=None,
)
def _draw_text(self, pos, text, font, text_fg, text_bg):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, text_fg, text_bg))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
maxlinelength = linelength = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(linelength, lineno),
temp,
font = self._get_style_font(style),
text_fg = self._get_text_color(style),
text_bg = self._get_text_bg_color(style),
)
temp_width, temp_hight = self.fonts.get_text_size(temp)
linelength += temp_width
maxlinelength = max(maxlinelength, linelength)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
linelength = 0
charno = 0
lineno += 1
self.maxlinelength = maxlinelength
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
if self.line_number_separator:
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxlinelength, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, text_fg, text_bg in self.drawables:
if text_bg:
text_size = draw.textsize(text=value, font=font)
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
draw.text(pos, value, font=font, fill=text_fg)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'

View file

@ -0,0 +1,181 @@
"""
pygments.formatters.irc
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for IRC output
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.util import get_choice_opt
__all__ = ['IRCFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
IRC_COLORS = {
Token: ('', ''),
Whitespace: ('gray', 'brightblack'),
Comment: ('gray', 'brightblack'),
Comment.Preproc: ('cyan', 'brightcyan'),
Keyword: ('blue', 'brightblue'),
Keyword.Type: ('cyan', 'brightcyan'),
Operator.Word: ('magenta', 'brightcyan'),
Name.Builtin: ('cyan', 'brightcyan'),
Name.Function: ('green', 'brightgreen'),
Name.Namespace: ('_cyan_', '_brightcyan_'),
Name.Class: ('_green_', '_brightgreen_'),
Name.Exception: ('cyan', 'brightcyan'),
Name.Decorator: ('brightblack', 'gray'),
Name.Variable: ('red', 'brightred'),
Name.Constant: ('red', 'brightred'),
Name.Attribute: ('cyan', 'brightcyan'),
Name.Tag: ('brightblue', 'brightblue'),
String: ('yellow', 'yellow'),
Number: ('blue', 'brightblue'),
Generic.Deleted: ('brightred', 'brightred'),
Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Error: ('brightred', 'brightred'),
Error: ('_brightred_', '_brightred_'),
}
IRC_COLOR_MAP = {
'white': 0,
'black': 1,
'blue': 2,
'brightgreen': 3,
'brightred': 4,
'yellow': 5,
'magenta': 6,
'orange': 7,
'green': 7, #compat w/ ansi
'brightyellow': 8,
'lightgreen': 9,
'brightcyan': 9, # compat w/ ansi
'cyan': 10,
'lightblue': 11,
'red': 11, # compat w/ ansi
'brightblue': 12,
'brightmagenta': 13,
'brightblack': 14,
'gray': 15,
}
def ircformat(color, text):
if len(color) < 1:
return text
add = sub = ''
if '_' in color: # italic
add += '\x1D'
sub = '\x1D' + sub
color = color.strip('_')
if '*' in color: # bold
add += '\x02'
sub = '\x02' + sub
color = color.strip('*')
# underline (\x1F) not supported
# backgrounds (\x03FF,BB) not supported
if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
sub = '\x03' + sub
return add + text + sub
return '<'+add+'>'+text+'</'+sub+'>'
class IRCFormatter(Formatter):
r"""
Format tokens with IRC color sequences
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers in the output as well
(default: ``False`` = no line numbers).
"""
name = 'IRC'
aliases = ['irc', 'IRC']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("\n%04d: " % self._lineno)
def _format_unencoded_with_lineno(self, tokensource, outfile):
self._write_lineno(outfile)
for ttype, value in tokensource:
if value.endswith("\n"):
self._write_lineno(outfile)
value = value[:-1]
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
self._write_lineno(outfile)
if line:
outfile.write(ircformat(color, line[:-1]))
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
outfile.write("\n")
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._format_unencoded_with_lineno(tokensource, outfile)
return
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ircformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)

View file

@ -0,0 +1,511 @@
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from io import StringIO
from pygments.formatter import Formatter
from pygments.lexer import Lexer, do_insertions
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', 'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
'\\catcode`\\_=8\\relax}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write('\\end{' + self.envname + '}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
# find and remove all the escape tokens (replace with an empty string)
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
buffered = ''
insertions = []
insertion_buf = []
for i, t, v in self._find_safe_escape_tokens(text):
if t is None:
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
insertion_buf = []
buffered += v
else:
insertion_buf.append((i, t, v))
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
return do_insertions(insertions,
self.lang.get_tokens_unprocessed(buffered))
def _find_safe_escape_tokens(self, text):
""" find escape tokens that are not in strings or comments """
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v2 in self._find_escape_tokens(v):
yield i + i2, t2, v2
else:
yield i, None, v
def _filter_to(self, it, pred):
""" Keep only the tokens that match `pred`, merge the others together """
buf = ''
idx = 0
for i, t, v in it:
if pred(t):
if buf:
yield idx, None, buf
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
yield idx, None, buf
def _find_escape_tokens(self, text):
""" Find escape tokens within text, give token=None otherwise """
index = 0
while text:
a, sep1, text = text.partition(self.left)
if a:
yield index, None, a
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b

View file

@ -0,0 +1,161 @@
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_choice_opt
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
write = outfile.write
flush = outfile.close
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
write = outfile.write
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = b"%r\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write(b"%r\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = '''\
def testNeedsName(lexer):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = '''\
]
assert list(lexer.get_tokens(fragment)) == tokens
'''
class TestcaseFormatter(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
before = TESTCASE_BEFORE % (''.join(rawbuf),)
during = ''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.write(before + during + after)
else:
outfile.write(before.encode('utf-8'))
outfile.write(during.encode('utf-8'))
outfile.write(after.encode('utf-8'))
outfile.flush()

View file

@ -0,0 +1,146 @@
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_int_opt, surrogatepair
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft(R) Word(R) documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font family, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
def __init__(self, **options):
r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvement
if not text:
return ''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append('{\\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
'{\\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write('\\red%d\\green%d\\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write('}\\f0 ')
if self.fontsize:
outfile.write('\\fs%d' % self.fontsize)
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append('\\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append('\\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append('\\b')
if style['italic']:
buf.append('\\i')
if style['underline']:
buf.append('\\ul')
if style['border']:
buf.append('\\chbrdr\\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}')

View file

@ -0,0 +1,188 @@
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Comment
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&amp;'). \
replace('<', '&lt;'). \
replace('>', '&gt;'). \
replace('"', '&quot;'). \
replace("'", '&#39;')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`linenos`
If ``True``, add line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenowidth`
Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
for up to 4-digit line numbers. Increase width for longer code blocks).
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to ``&#160;``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self.linenos = get_bool_opt(options,'linenos',False)
self.linenostart = get_int_opt(options,'linenostart',1)
self.linenostep = get_int_opt(options,'linenostep',1)
self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
counter = self.linenostart
counter_step = self.linenostep
counter_style = self._get_style(Comment)
line_x = x
if self.linenos:
if counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' %
(x+self.linenowidth,y,counter_style,counter))
line_x += self.linenowidth + self.ystep
counter += 1
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', '&#160;')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n')
if self.linenos and counter % counter_step == 0:
outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' %
(x+self.linenowidth,y,counter_style,counter))
counter += 1
outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result

View file

@ -0,0 +1,129 @@
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('gray', 'brightblack'),
Comment: ('gray', 'brightblack'),
Comment.Preproc: ('cyan', 'brightcyan'),
Keyword: ('blue', 'brightblue'),
Keyword.Type: ('cyan', 'brightcyan'),
Operator.Word: ('magenta', 'brightmagenta'),
Name.Builtin: ('cyan', 'brightcyan'),
Name.Function: ('green', 'brightgreen'),
Name.Namespace: ('_cyan_', '_brightcyan_'),
Name.Class: ('_green_', '_brightgreen_'),
Name.Exception: ('cyan', 'brightcyan'),
Name.Decorator: ('brightblack', 'gray'),
Name.Variable: ('red', 'brightred'),
Name.Constant: ('red', 'brightred'),
Name.Attribute: ('cyan', 'brightcyan'),
Name.Tag: ('brightblue', 'brightblue'),
String: ('yellow', 'yellow'),
Number: ('blue', 'brightblue'),
Generic.Deleted: ('brightred', 'brightred'),
Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Prompt: ('**', '**'),
Generic.Error: ('brightred', 'brightred'),
Error: ('_brightred_', '_brightred_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
def _get_color(self, ttype):
# self.colorscheme is a dict containing usually generic types, so we
# have to walk the tree of dots. The base Token type must be a key,
# even if it's empty string, as in the default above.
colors = self.colorscheme.get(ttype)
while colors is None:
ttype = ttype.parent
colors = self.colorscheme.get(ttype)
return colors[self.darkbg]
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._write_lineno(outfile)
for ttype, value in tokensource:
color = self._get_color(ttype)
for line in value.splitlines(True):
if color:
outfile.write(ansiformat(color, line.rstrip('\n')))
else:
outfile.write(line.rstrip('\n'))
if line.endswith('\n'):
if self.linenos:
self._write_lineno(outfile)
else:
outfile.write('\n')
if self.linenos:
outfile.write("\n")

View file

@ -0,0 +1,340 @@
"""
pygments.formatters.terminal256
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for 256-color terminal output with ANSI sequences.
RGB-to-XTERM color conversion routines adapted from xterm256-conv
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
by Wolfgang Frisch.
Formatter version 1.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# TODO:
# - Options to map style's bold/underline/italic/border attributes
# to some ANSI attrbutes (something like 'italic=underline')
# - An option to output "style RGB to xterm RGB/index" conversion table
# - An option to indicate that we are running in "reverse background"
# xterm. This means that default colors are white-on-black, not
# black-on-while, so colors like "white background" need to be converted
# to "white background, black foreground", etc...
import sys
from pygments.formatter import Formatter
from pygments.console import codes
from pygments.style import ansicolors
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
class EscapeSequence:
def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
self.fg = fg
self.bg = bg
self.bold = bold
self.underline = underline
self.italic = italic
def escape(self, attrs):
if len(attrs):
return "\x1b[" + ";".join(attrs) + "m"
return ""
def color_string(self):
attrs = []
if self.fg is not None:
if self.fg in ansicolors:
esc = codes[self.fg.replace('ansi','')]
if ';01m' in esc:
self.bold = True
# extract fg color code.
attrs.append(esc[2:4])
else:
attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
if self.bg in ansicolors:
esc = codes[self.bg.replace('ansi','')]
# extract fg color code, add 10 for bg.
attrs.append(str(int(esc[2:4])+10))
else:
attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
if self.italic:
attrs.append("03")
return self.escape(attrs)
def true_color_string(self):
attrs = []
if self.fg:
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
if self.bg:
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
if self.italic:
attrs.append("03")
return self.escape(attrs)
def reset_string(self):
attrs = []
if self.fg is not None:
attrs.append("39")
if self.bg is not None:
attrs.append("49")
if self.bold or self.underline or self.italic:
attrs.append("00")
return self.escape(attrs)
class Terminal256Formatter(Formatter):
"""
Format tokens with ANSI color sequences, for output in a 256-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
The formatter takes colors from a style defined by the `style` option
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
.. versionadded:: 0.9
.. versionchanged:: 2.2
If the used style defines foreground colors in the form ``#ansi*``, then
`Terminal256Formatter` will map these to non extended foreground color.
See :ref:`AnsiTerminalStyle` for more information.
.. versionchanged:: 2.4
The ANSI color names have been updated with names that are easier to
understand and align with colornames of other projects and terminals.
See :ref:`this table <new-ansi-color-names>` for more information.
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = 'Terminal256'
aliases = ['terminal256', 'console256', '256']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.xterm_colors = []
self.best_match = {}
self.style_string = {}
self.usebold = 'nobold' not in options
self.useunderline = 'nounderline' not in options
self.useitalic = 'noitalic' not in options
self._build_color_table() # build an RGB-to-256 color conversion table
self._setup_styles() # convert selected style's colors to term. colors
self.linenos = options.get('linenos', False)
self._lineno = 0
def _build_color_table(self):
# colors 0..15: 16 basic colors
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
# colors 16..232: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
self.xterm_colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 22):
v = 8 + i * 10
self.xterm_colors.append((v, v, v))
def _closest_color(self, r, g, b):
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 0
for i in range(0, 254):
values = self.xterm_colors[i]
rd = r - values[0]
gd = g - values[1]
bd = b - values[2]
d = rd*rd + gd*gd + bd*bd
if d < distance:
match = i
distance = d
return match
def _color_index(self, color):
index = self.best_match.get(color, None)
if color in ansicolors:
# strip the `ansi/#ansi` part and look up code
index = color
self.best_match[color] = index
if index is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
index = self._closest_color(r, g, b)
self.best_match[color] = index
return index
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
# get foreground from ansicolor if set
if ndef['ansicolor']:
escape.fg = self._color_index(ndef['ansicolor'])
elif ndef['color']:
escape.fg = self._color_index(ndef['color'])
if ndef['bgansicolor']:
escape.bg = self._color_index(ndef['bgansicolor'])
elif ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
if self.useitalic and ndef['italic']:
escape.italic = True
self.style_string[str(ttype)] = (escape.color_string(),
escape.reset_string())
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._write_lineno(outfile)
for ttype, value in tokensource:
not_found = True
while ttype and not_found:
try:
# outfile.write( "<" + str(ttype) + ">" )
on, off = self.style_string[str(ttype)]
# Like TerminalFormatter, add "reset colors" escape sequence
# on newline.
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(on + line + off)
if self.linenos:
self._write_lineno(outfile)
else:
outfile.write('\n')
if spl[-1]:
outfile.write(on + spl[-1] + off)
not_found = False
# outfile.write( '#' + str(ttype) + '#' )
except KeyError:
# ottype = ttype
ttype = ttype[:-1]
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
if not_found:
outfile.write(value)
if self.linenos:
outfile.write("\n")
class TerminalTrueColorFormatter(Terminal256Formatter):
r"""
Format tokens with ANSI color sequences, for output in a true-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
.. versionadded:: 2.1
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'TerminalTrueColor'
aliases = ['terminal16m', 'console16m', '16m']
filenames = []
def _build_color_table(self):
pass
def _color_tuple(self, color):
try:
rgb = int(str(color), 16)
except ValueError:
return None
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
return (r, g, b)
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
if ndef['color']:
escape.fg = self._color_tuple(ndef['color'])
if ndef['bgcolor']:
escape.bg = self._color_tuple(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
if self.useitalic and ndef['italic']:
escape.italic = True
self.style_string[str(ttype)] = (escape.true_color_string(),
escape.reset_string())

874
libs/pygments/lexer.py Normal file
View file

@ -0,0 +1,874 @@
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import time
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator, Future, guess_decode
from pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
'default', 'words']
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
(b'\xff\xfe\0\0', 'utf-32'),
(b'\0\0\xfe\xff', 'utf-32be'),
(b'\xff\xfe', 'utf-16'),
(b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(mcs, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(mcs, name, bases, d)
class Lexer(metaclass=LexerMeta):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
Latin1 detection. Can also be ``'chardet'`` to use the chardet
library, if it is installed.
``inencoding``
Overrides the ``encoding`` if given.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'guess')
self.encoding = options.get('inencoding') or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, str):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
import chardet
except ImportError as e:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/') from e
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
else:
text = text.decode(self.encoding)
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
else:
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for _, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (index, tokentype, value) pairs where "index"
is the starting position of the token within the input text.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
# ------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str): # pylint: disable=invalid-name
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit:
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit() # pylint: disable=invalid-name
class combined(tuple): # pylint: disable=invalid-name
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch:
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This:
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class default:
"""
Indicates a state or state action (e.g. #pop) to apply.
For example default('#pop') is equivalent to ('', Token, '#pop')
Note that state tuples may be used as well.
.. versionadded:: 2.0
"""
def __init__(self, state):
self.state = state
class words(Future):
"""
Indicates a list of literal words that is transformed into an optimized
regex that matches any of the words.
.. versionadded:: 2.0
"""
def __init__(self, words, prefix='', suffix=''):
self.words = words
self.prefix = prefix
self.suffix = suffix
def get(self):
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err)) from err
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in toks.items():
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
yield from action(self, m)
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(statestack) > 1:
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop, but keep at least one state on the stack
# (random code leading to unexpected pops should
# not allow exceptions)
if abs(new_state) >= len(statestack):
del statestack[1:]
else:
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, '\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext:
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
yield from action(self, m, ctx)
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(ctx.stack) > 1:
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# see RegexLexer for why this check is made
if abs(new_state) >= len(ctx.stack):
del ctx.state[1:]
else:
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
yield from tokens
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
if tmpval:
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
if oldi < len(v):
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
class ProfilingRegexLexerMeta(RegexLexerMeta):
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, words):
rex = regex_opt(regex.words, prefix=regex.prefix,
suffix=regex.suffix)
else:
rex = regex
compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize):
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
t0 = time.time()
res = compiled.match(text, pos, endpos)
t1 = time.time()
info[0] += 1
info[1] += t1 - t0
return res
return match_func
class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
_prof_sort_index = 4 # defaults to time per call
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)
for ((s, r), (n, t)) in rawdata.items()),
key=lambda x: x[self._prof_sort_index],
reverse=True)
sum_total = sum(x[3] for x in data)
print()
print('Profiling result for %s lexing %d chars in %.3f ms' %
(self.__class__.__name__, len(text), sum_total))
print('=' * 110)
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
print('-' * 110)
for d in data:
print('%-20s %-65s %5d %8.4f %8.4f' % d)
print('=' * 110)

View file

@ -0,0 +1,341 @@
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, guess_decode
COMPAT = {
'Python3Lexer': 'PythonLexer',
'Python3TracebackLexer': 'PythonTracebackLexer',
}
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.values():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.values():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(lexername, filename))
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except OSError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom lexer: %s' % err)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in LEXERS.values():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
yield from find_plugin_lexers()
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
if not isinstance(_text, str):
inencoding = options.get('inencoding', options.get('encoding'))
if inencoding:
_text = _text.decode(inencoding or 'utf8')
else:
_text, _ = guess_decode(_text)
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
if name in COMPAT:
return getattr(self, COMPAT[name])
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,231 @@
"""
pygments.lexers._cl_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = { # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
}
SPECIAL_FORMS = {
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
}
MACROS = {
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
}
LAMBDA_LIST_KEYWORDS = {
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
}
DECLARATIONS = {
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
}
BUILTIN_TYPES = {
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
}
BUILTIN_CLASSES = {
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,292 @@
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getmetatable',
'ipairs',
'load',
'loadfile',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawlen',
'rawset',
'select',
'setmetatable',
'tonumber',
'tostring',
'type',
'xpcall'),
'bit32': ('bit32.arshift',
'bit32.band',
'bit32.bnot',
'bit32.bor',
'bit32.btest',
'bit32.bxor',
'bit32.extract',
'bit32.lrotate',
'bit32.lshift',
'bit32.replace',
'bit32.rrotate',
'bit32.rshift'),
'coroutine': ('coroutine.create',
'coroutine.isyieldable',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.getuservalue',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.setuservalue',
'debug.traceback',
'debug.upvalueid',
'debug.upvaluejoin'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.stderr',
'io.stdin',
'io.stdout',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan',
'math.atan2',
'math.ceil',
'math.cos',
'math.cosh',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log',
'math.max',
'math.maxinteger',
'math.min',
'math.mininteger',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sin',
'math.sinh',
'math.sqrt',
'math.tan',
'math.tanh',
'math.tointeger',
'math.type',
'math.ult'),
'modules': ('package.config',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.searchers',
'package.searchpath',
'require'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.pack',
'string.packsize',
'string.rep',
'string.reverse',
'string.sub',
'string.unpack',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.move',
'table.pack',
'table.remove',
'table.sort',
'table.unpack'),
'utf8': ('utf8.char',
'utf8.charpattern',
'utf8.codepoint',
'utf8.codes',
'utf8.len',
'utf8.offset')}
if __name__ == '__main__': # pragma: no cover
import re
import sys
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">(Lua )?\1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(?!lua|LUA)([^:]+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
version = get_newest_version()
functions = set()
for v in ('5.2', version):
print('> Downloading function index for Lua %s' % v)
f = get_lua_functions(v)
print('> %d functions found, %d new:' %
(len(f), len(set(f) - functions)))
functions |= set(f)
functions = sorted(functions)
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
modules = {k: tuple(v) for k, v in modules.items()}
regenerate(__file__, modules)
run()

View file

@ -0,0 +1,554 @@
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'AMDGPULexer': ('pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'ArrowLexer': ('pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BareLexer': ('pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bib', 'bibtex'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CddlLexer': ('pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DevicetreeLexer': ('pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'ExeclineLexer': ('pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FStarLexer': ('pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'FutharkLexer': ('pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GDScriptLexer': ('pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GraphvizLexer': ('pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm', '*.mjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', (), (), ()),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LlvmMirBodyLexer': ('pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
'LlvmMirLexer': ('pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'markdown', ('md', 'markdown'), ('*.md', '*.markdown'), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'MiniScriptLexer': ('pygments.lexers.scripting', 'MiniScript', ('ms', 'miniscript'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MoselLexer': ('pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'PegLexer': ('pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PointlessLexer': ('pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PromQLLexer': ('pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PsyshConsoleLexer': ('pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rnc', 'rng-compact'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'ReasonLexer': ('pygments.lexers.ml', 'ReasonML', ('reason', 'reasonml'), ('*.re', '*.rei'), ('text/x-reasonml',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), (), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RideLexer': ('pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TNTLexer': ('pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('ttl', 'teraterm', 'teratermmacro'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TiddlyWiki5Lexer': ('pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts', 'typescript'), ('*.ts', '*.tsx'), ('text/x-typescript',)),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UsdLexer': ('pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WebIDLLexer': ('pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,677 @@
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL lexer.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATTACH',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BY',
'CACHE',
'CALL',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COLUMNS',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'CONCURRENTLY',
'CONFIGURATION',
'CONFLICT',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CUBE',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DEPENDS',
'DESC',
'DETACH',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXPRESSION',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GENERATED',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'GROUPING',
'GROUPS',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IMPORT',
'IN',
'INCLUDE',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'LOCKED',
'LOGGED',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'METHOD',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEW',
'NEXT',
'NFC',
'NFD',
'NFKC',
'NFKD',
'NO',
'NONE',
'NORMALIZE',
'NORMALIZED',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'OLD',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OTHERS',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OVERRIDING',
'OWNED',
'OWNER',
'PARALLEL',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROCEDURES',
'PROGRAM',
'PUBLICATION',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFERENCING',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROLLUP',
'ROUTINE',
'ROUTINES',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCHEMAS',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SETS',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SKIP',
'SMALLINT',
'SNAPSHOT',
'SOME',
'SQL',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STORED',
'STRICT',
'STRIP',
'SUBSCRIPTION',
'SUBSTRING',
'SUPPORT',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESAMPLE',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIES',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TRANSFORM',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UESCAPE',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'XMLTABLE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'macaddr8',
'money',
'numeric',
'path',
'pg_lsn',
'pg_snapshot',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyarray',
'anycompatible',
'anycompatiblearray',
'anycompatiblenonarray',
'anycompatiblerange',
'anyelement',
'anyenum',
'anynonarray',
'anyrange',
'cstring',
'event_trigger',
'fdw_handler',
'index_am_handler',
'internal',
'language_handler',
'pg_ddl_command',
'record',
'table_am_handler',
'trigger',
'tsm_handler',
'unknown',
'void',
)
# Remove 'trigger' from types
PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
PLPGSQL_KEYWORDS = (
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
)
if __name__ == '__main__': # pragma: no cover
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from pygments.util import format_lines
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
KEYWORDS_URL = SOURCE_URL + '/src/include/parser/kwlist.h'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
content = urlopen(DATATYPES_URL).read().decode('utf-8', errors='ignore')
data_file = list(content.splitlines())
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
content = urlopen(KEYWORDS_URL).read().decode('utf-8', errors='ignore')
keywords = parse_keywords(content)
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
def parse_keywords(f):
kw = []
for m in re.finditer(r'PG_KEYWORD\("(.+?)"', f):
kw.append(m.group(1).upper())
if not kw:
raise ValueError('no keyword found')
kw.sort()
return kw
def parse_datatypes(f):
dt = set()
for line in f:
if '<sect1' in line:
break
if '<entry><type>' not in line:
continue
# Parse a string such as
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
# into types "time" and "without time zone"
# remove all the tags
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
line = re.sub("<[^>]+>", "", line)
# Drop the parts containing braces
for tmp in [t for tmp in line.split('[')
for t in tmp.split(']') if "(" not in t]:
for t in tmp.split(','):
t = t.strip()
if not t: continue
dt.add(" ".join(t.split()))
dt = list(dt)
dt.sort()
return dt
def parse_pseudos(f):
dt = []
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
re_entry = re.compile(r'\s*<entry><type>(.+?)</type></entry>')
re_end = re.compile(r'\s*</table>')
f = iter(f)
for line in f:
if re_start.match(line) is not None:
break
else:
raise ValueError('pseudo datatypes table not found')
for line in f:
m = re_entry.match(line)
if m is not None:
dt.append(m.group(1))
if re_end.match(line) is not None:
break
else:
raise ValueError('end of pseudo datatypes table not found')
if not dt:
raise ValueError('pseudo datatypes not found')
dt.sort()
return dt
def update_consts(filename, constname, content):
with open(filename) as f:
data = f.read()
# Line to start/end inserting
re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S)
m = re_match.search(data)
if not m:
raise ValueError('Could not find existing definition for %s' %
(constname,))
new_block = format_lines(constname, content)
data = data[:m.start()] + new_block + data[m.end():]
with open(filename, 'w', newline='\n') as f:
f.write(data)
update_myself()

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,557 @@
"""
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer. This is for Stan language version 2.17.0.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = (
'break',
'continue',
'else',
'for',
'if',
'in',
'print',
'reject',
'return',
'while',
)
TYPES = (
'cholesky_factor_corr',
'cholesky_factor_cov',
'corr_matrix',
'cov_matrix',
'int',
'matrix',
'ordered',
'positive_ordered',
'real',
'row_vector',
'simplex',
'unit_vector',
'vector',
'void',
)
FUNCTIONS = (
'abs',
'acos',
'acosh',
'algebra_solver',
'append_array',
'append_col',
'append_row',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_cdf',
'bernoulli_lccdf',
'bernoulli_lcdf',
'bernoulli_logit_lpmf',
'bernoulli_logit_rng',
'bernoulli_lpmf',
'bernoulli_rng',
'bessel_first_kind',
'bessel_second_kind',
'beta_binomial_cdf',
'beta_binomial_lccdf',
'beta_binomial_lcdf',
'beta_binomial_lpmf',
'beta_binomial_rng',
'beta_cdf',
'beta_lccdf',
'beta_lcdf',
'beta_lpdf',
'beta_rng',
'binary_log_loss',
'binomial_cdf',
'binomial_coefficient_log',
'binomial_lccdf',
'binomial_lcdf',
'binomial_logit_lpmf',
'binomial_lpmf',
'binomial_rng',
'block',
'categorical_logit_lpmf',
'categorical_logit_rng',
'categorical_lpmf',
'categorical_rng',
'cauchy_cdf',
'cauchy_lccdf',
'cauchy_lcdf',
'cauchy_lpdf',
'cauchy_rng',
'cbrt',
'ceil',
'chi_square_cdf',
'chi_square_lccdf',
'chi_square_lcdf',
'chi_square_lpdf',
'chi_square_rng',
'cholesky_decompose',
'choose',
'col',
'cols',
'columns_dot_product',
'columns_dot_self',
'cos',
'cosh',
'cov_exp_quad',
'crossprod',
'csr_extract_u',
'csr_extract_v',
'csr_extract_w',
'csr_matrix_times_vector',
'csr_to_dense_matrix',
'cumulative_sum',
'determinant',
'diag_matrix',
'diag_post_multiply',
'diag_pre_multiply',
'diagonal',
'digamma',
'dims',
'dirichlet_lpdf',
'dirichlet_rng',
'distance',
'dot_product',
'dot_self',
'double_exponential_cdf',
'double_exponential_lccdf',
'double_exponential_lcdf',
'double_exponential_lpdf',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
'erf',
'erfc',
'exp',
'exp2',
'exp_mod_normal_cdf',
'exp_mod_normal_lccdf',
'exp_mod_normal_lcdf',
'exp_mod_normal_lpdf',
'exp_mod_normal_rng',
'expm1',
'exponential_cdf',
'exponential_lccdf',
'exponential_lcdf',
'exponential_lpdf',
'exponential_rng',
'fabs',
'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'frechet_cdf',
'frechet_lccdf',
'frechet_lcdf',
'frechet_lpdf',
'frechet_rng',
'gamma_cdf',
'gamma_lccdf',
'gamma_lcdf',
'gamma_lpdf',
'gamma_p',
'gamma_q',
'gamma_rng',
'gaussian_dlm_obs_lpdf',
'get_lp',
'gumbel_cdf',
'gumbel_lccdf',
'gumbel_lcdf',
'gumbel_lpdf',
'gumbel_rng',
'head',
'hypergeometric_lpmf',
'hypergeometric_rng',
'hypot',
'inc_beta',
'int_step',
'integrate_ode',
'integrate_ode_bdf',
'integrate_ode_rk45',
'inv',
'inv_chi_square_cdf',
'inv_chi_square_lccdf',
'inv_chi_square_lcdf',
'inv_chi_square_lpdf',
'inv_chi_square_rng',
'inv_cloglog',
'inv_gamma_cdf',
'inv_gamma_lccdf',
'inv_gamma_lcdf',
'inv_gamma_lpdf',
'inv_gamma_rng',
'inv_logit',
'inv_Phi',
'inv_sqrt',
'inv_square',
'inv_wishart_lpdf',
'inv_wishart_rng',
'inverse',
'inverse_spd',
'is_inf',
'is_nan',
'lbeta',
'lchoose',
'lgamma',
'lkj_corr_cholesky_lpdf',
'lkj_corr_cholesky_rng',
'lkj_corr_lpdf',
'lkj_corr_rng',
'lmgamma',
'lmultiply',
'log',
'log10',
'log1m',
'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
'log_diff_exp',
'log_falling_factorial',
'log_inv_logit',
'log_mix',
'log_rising_factorial',
'log_softmax',
'log_sum_exp',
'logistic_cdf',
'logistic_lccdf',
'logistic_lcdf',
'logistic_lpdf',
'logistic_rng',
'logit',
'lognormal_cdf',
'lognormal_lccdf',
'lognormal_lcdf',
'lognormal_lpdf',
'lognormal_rng',
'machine_precision',
'matrix_exp',
'max',
'mdivide_left_spd',
'mdivide_left_tri_low',
'mdivide_right_spd',
'mdivide_right_tri_low',
'mean',
'min',
'modified_bessel_first_kind',
'modified_bessel_second_kind',
'multi_gp_cholesky_lpdf',
'multi_gp_lpdf',
'multi_normal_cholesky_lpdf',
'multi_normal_cholesky_rng',
'multi_normal_lpdf',
'multi_normal_prec_lpdf',
'multi_normal_rng',
'multi_student_t_lpdf',
'multi_student_t_rng',
'multinomial_lpmf',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_2_cdf',
'neg_binomial_2_lccdf',
'neg_binomial_2_lcdf',
'neg_binomial_2_log_lpmf',
'neg_binomial_2_log_rng',
'neg_binomial_2_lpmf',
'neg_binomial_2_rng',
'neg_binomial_cdf',
'neg_binomial_lccdf',
'neg_binomial_lcdf',
'neg_binomial_lpmf',
'neg_binomial_rng',
'negative_infinity',
'normal_cdf',
'normal_lccdf',
'normal_lcdf',
'normal_lpdf',
'normal_rng',
'not_a_number',
'num_elements',
'ordered_logistic_lpmf',
'ordered_logistic_rng',
'owens_t',
'pareto_cdf',
'pareto_lccdf',
'pareto_lcdf',
'pareto_lpdf',
'pareto_rng',
'pareto_type_2_cdf',
'pareto_type_2_lccdf',
'pareto_type_2_lcdf',
'pareto_type_2_lpdf',
'pareto_type_2_rng',
'Phi',
'Phi_approx',
'pi',
'poisson_cdf',
'poisson_lccdf',
'poisson_lcdf',
'poisson_log_lpmf',
'poisson_log_rng',
'poisson_lpmf',
'poisson_rng',
'positive_infinity',
'pow',
'print',
'prod',
'qr_Q',
'qr_R',
'quad_form',
'quad_form_diag',
'quad_form_sym',
'rank',
'rayleigh_cdf',
'rayleigh_lccdf',
'rayleigh_lcdf',
'rayleigh_lpdf',
'rayleigh_rng',
'reject',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
'rising_factorial',
'round',
'row',
'rows',
'rows_dot_product',
'rows_dot_self',
'scaled_inv_chi_square_cdf',
'scaled_inv_chi_square_lccdf',
'scaled_inv_chi_square_lcdf',
'scaled_inv_chi_square_lpdf',
'scaled_inv_chi_square_rng',
'sd',
'segment',
'sin',
'singular_values',
'sinh',
'size',
'skew_normal_cdf',
'skew_normal_lccdf',
'skew_normal_lcdf',
'skew_normal_lpdf',
'skew_normal_rng',
'softmax',
'sort_asc',
'sort_desc',
'sort_indices_asc',
'sort_indices_desc',
'sqrt',
'sqrt2',
'square',
'squared_distance',
'step',
'student_t_cdf',
'student_t_lccdf',
'student_t_lcdf',
'student_t_lpdf',
'student_t_rng',
'sub_col',
'sub_row',
'sum',
'tail',
'tan',
'tanh',
'target',
'tcrossprod',
'tgamma',
'to_array_1d',
'to_array_2d',
'to_matrix',
'to_row_vector',
'to_vector',
'trace',
'trace_gen_quad_form',
'trace_quad_form',
'trigamma',
'trunc',
'uniform_cdf',
'uniform_lccdf',
'uniform_lcdf',
'uniform_lpdf',
'uniform_rng',
'variance',
'von_mises_lpdf',
'von_mises_rng',
'weibull_cdf',
'weibull_lccdf',
'weibull_lcdf',
'weibull_lpdf',
'weibull_rng',
'wiener_lpdf',
'wishart_lpdf',
'wishart_rng',
)
DISTRIBUTIONS = (
'bernoulli',
'bernoulli_logit',
'beta',
'beta_binomial',
'binomial',
'binomial_logit',
'categorical',
'categorical_logit',
'cauchy',
'chi_square',
'dirichlet',
'double_exponential',
'exp_mod_normal',
'exponential',
'frechet',
'gamma',
'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'lkj_corr_cholesky',
'logistic',
'lognormal',
'multi_gp',
'multi_gp_cholesky',
'multi_normal',
'multi_normal_cholesky',
'multi_normal_prec',
'multi_student_t',
'multinomial',
'neg_binomial',
'neg_binomial_2',
'neg_binomial_2_log',
'normal',
'ordered_logistic',
'pareto',
'pareto_type_2',
'poisson',
'poisson_log',
'rayleigh',
'scaled_inv_chi_square',
'skew_normal',
'student_t',
'uniform',
'von_mises',
'weibull',
'wiener',
'wishart',
)
RESERVED = (
'alignas',
'alignof',
'and',
'and_eq',
'asm',
'auto',
'bitand',
'bitor',
'bool',
'break',
'case',
'catch',
'char',
'char16_t',
'char32_t',
'class',
'compl',
'const',
'const_cast',
'constexpr',
'continue',
'decltype',
'default',
'delete',
'do',
'double',
'dynamic_cast',
'else',
'enum',
'explicit',
'export',
'extern',
'false',
'float',
'for',
'friend',
'fvar',
'goto',
'if',
'in',
'inline',
'int',
'long',
'lp__',
'mutable',
'namespace',
'new',
'noexcept',
'not',
'not_eq',
'nullptr',
'operator',
'or',
'or_eq',
'private',
'protected',
'public',
'register',
'reinterpret_cast',
'repeat',
'return',
'short',
'signed',
'sizeof',
'STAN_MAJOR',
'STAN_MATH_MAJOR',
'STAN_MATH_MINOR',
'STAN_MATH_PATCH',
'STAN_MINOR',
'STAN_PATCH',
'static',
'static_assert',
'static_cast',
'struct',
'switch',
'template',
'then',
'this',
'thread_local',
'throw',
'true',
'try',
'typedef',
'typeid',
'typename',
'union',
'unsigned',
'until',
'using',
'var',
'virtual',
'void',
'volatile',
'wchar_t',
'while',
'xor',
'xor_eq',
)

View file

@ -0,0 +1,457 @@
"""
pygments.lexers._stata_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtins for Stata
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
builtins_special = (
"if", "in", "using", "replace", "by", "gen", "generate"
)
builtins_base = (
"if", "else", "in", "foreach", "for", "forv", "forva",
"forval", "forvalu", "forvalue", "forvalues", "by", "bys",
"bysort", "quietly", "qui", "about", "ac",
"ac_7", "acprplot", "acprplot_7", "adjust", "ado", "adopath",
"adoupdate", "alpha", "ameans", "an", "ano", "anov", "anova",
"anova_estat", "anova_terms", "anovadef", "aorder", "ap", "app",
"appe", "appen", "append", "arch", "arch_dr", "arch_estat",
"arch_p", "archlm", "areg", "areg_p", "args", "arima",
"arima_dr", "arima_estat", "arima_p", "as", "asmprobit",
"asmprobit_estat", "asmprobit_lf", "asmprobit_mfx__dlg",
"asmprobit_p", "ass", "asse", "asser", "assert", "avplot",
"avplot_7", "avplots", "avplots_7", "bcskew0", "bgodfrey",
"binreg", "bip0_lf", "biplot", "bipp_lf", "bipr_lf",
"bipr_p", "biprobit", "bitest", "bitesti", "bitowt", "blogit",
"bmemsize", "boot", "bootsamp", "bootstrap", "bootstrap_8",
"boxco_l", "boxco_p", "boxcox", "boxcox_6", "boxcox_p",
"bprobit", "br", "break", "brier", "bro", "brow", "brows",
"browse", "brr", "brrstat", "bs", "bs_7", "bsampl_w",
"bsample", "bsample_7", "bsqreg", "bstat", "bstat_7", "bstat_8",
"bstrap", "bstrap_7", "ca", "ca_estat", "ca_p", "cabiplot",
"camat", "canon", "canon_8", "canon_8_p", "canon_estat",
"canon_p", "cap", "caprojection", "capt", "captu", "captur",
"capture", "cat", "cc", "cchart", "cchart_7", "cci",
"cd", "censobs_table", "centile", "cf", "char", "chdir",
"checkdlgfiles", "checkestimationsample", "checkhlpfiles",
"checksum", "chelp", "ci", "cii", "cl", "class", "classutil",
"clear", "cli", "clis", "clist", "clo", "clog", "clog_lf",
"clog_p", "clogi", "clogi_sw", "clogit", "clogit_lf",
"clogit_p", "clogitp", "clogl_sw", "cloglog", "clonevar",
"clslistarray", "cluster", "cluster_measures", "cluster_stop",
"cluster_tree", "cluster_tree_8", "clustermat", "cmdlog",
"cnr", "cnre", "cnreg", "cnreg_p", "cnreg_sw", "cnsreg",
"codebook", "collaps4", "collapse", "colormult_nb",
"colormult_nw", "compare", "compress", "conf", "confi",
"confir", "confirm", "conren", "cons", "const", "constr",
"constra", "constrai", "constrain", "constraint", "continue",
"contract", "copy", "copyright", "copysource", "cor", "corc",
"corr", "corr2data", "corr_anti", "corr_kmo", "corr_smc",
"corre", "correl", "correla", "correlat", "correlate",
"corrgram", "cou", "coun", "count", "cox", "cox_p", "cox_sw",
"coxbase", "coxhaz", "coxvar", "cprplot", "cprplot_7",
"crc", "cret", "cretu", "cretur", "creturn", "cross", "cs",
"cscript", "cscript_log", "csi", "ct", "ct_is", "ctset",
"ctst_5", "ctst_st", "cttost", "cumsp", "cumsp_7", "cumul",
"cusum", "cusum_7", "cutil", "d", "datasig", "datasign",
"datasigna", "datasignat", "datasignatu", "datasignatur",
"datasignature", "datetof", "db", "dbeta", "de", "dec",
"deco", "decod", "decode", "deff", "des", "desc", "descr",
"descri", "describ", "describe", "destring", "dfbeta",
"dfgls", "dfuller", "di", "di_g", "dir", "dirstats", "dis",
"discard", "disp", "disp_res", "disp_s", "displ", "displa",
"display", "distinct", "do", "doe", "doed", "doedi",
"doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm",
"drop", "ds", "ds_util", "dstdize", "duplicates", "durbina",
"dwstat", "dydx", "e", "ed", "edi", "edit", "egen",
"eivreg", "emdef", "end", "en", "enc", "enco", "encod", "encode",
"eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw",
"ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp",
"ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret",
"eretu", "eretur", "ereturn", "err", "erro", "error", "est",
"est_cfexist", "est_cfname", "est_clickable", "est_expand",
"est_hold", "est_table", "est_unhold", "est_unholdok",
"estat", "estat_default", "estat_summ", "estat_vce_only",
"esti", "estimates", "etodow", "etof", "etomdy", "ex",
"exi", "exit", "expand", "expandcl", "fac", "fact", "facto",
"factor", "factor_estat", "factor_p", "factor_pca_rotated",
"factor_rotate", "factormat", "fcast", "fcast_compute",
"fcast_graph", "fdades", "fdadesc", "fdadescr", "fdadescri",
"fdadescrib", "fdadescribe", "fdasav", "fdasave", "fdause",
"fh_st", "open", "read", "close",
"file", "filefilter", "fillin", "find_hlp_file", "findfile",
"findit", "findit_7", "fit", "fl", "fli", "flis", "flist",
"for5_0", "form", "forma", "format", "fpredict", "frac_154",
"frac_adj", "frac_chk", "frac_cox", "frac_ddp", "frac_dis",
"frac_dv", "frac_in", "frac_mun", "frac_pp", "frac_pq",
"frac_pv", "frac_wgt", "frac_xo", "fracgen", "fracplot",
"fracplot_7", "fracpoly", "fracpred", "fron_ex", "fron_hn",
"fron_p", "fron_tn", "fron_tn2", "frontier", "ftodate", "ftoe",
"ftomdy", "ftowdate", "g", "gamhet_glf", "gamhet_gp",
"gamhet_ilf", "gamhet_ip", "gamma", "gamma_d2", "gamma_p",
"gamma_sw", "gammahet", "gdi_hexagon", "gdi_spokes", "ge",
"gen", "gene", "gener", "genera", "generat", "generate",
"genrank", "genstd", "genvmean", "gettoken", "gl", "gladder",
"gladder_7", "glim_l01", "glim_l02", "glim_l03", "glim_l04",
"glim_l05", "glim_l06", "glim_l07", "glim_l08", "glim_l09",
"glim_l10", "glim_l11", "glim_l12", "glim_lf", "glim_mu",
"glim_nw1", "glim_nw2", "glim_nw3", "glim_p", "glim_v1",
"glim_v2", "glim_v3", "glim_v4", "glim_v5", "glim_v6",
"glim_v7", "glm", "glm_6", "glm_p", "glm_sw", "glmpred", "glo",
"glob", "globa", "global", "glogit", "glogit_8", "glogit_p",
"gmeans", "gnbre_lf", "gnbreg", "gnbreg_5", "gnbreg_p",
"gomp_lf", "gompe_sw", "gomper_p", "gompertz", "gompertzhet",
"gomphet_glf", "gomphet_glf_sh", "gomphet_gp", "gomphet_ilf",
"gomphet_ilf_sh", "gomphet_ip", "gphdot", "gphpen",
"gphprint", "gprefs", "gprobi_p", "gprobit", "gprobit_8", "gr",
"gr7", "gr_copy", "gr_current", "gr_db", "gr_describe",
"gr_dir", "gr_draw", "gr_draw_replay", "gr_drop", "gr_edit",
"gr_editviewopts", "gr_example", "gr_example2", "gr_export",
"gr_print", "gr_qscheme", "gr_query", "gr_read", "gr_rename",
"gr_replay", "gr_save", "gr_set", "gr_setscheme", "gr_table",
"gr_undo", "gr_use", "graph", "graph7", "grebar", "greigen",
"greigen_7", "greigen_8", "grmeanby", "grmeanby_7",
"gs_fileinfo", "gs_filetype", "gs_graphinfo", "gs_stat",
"gsort", "gwood", "h", "hadimvo", "hareg", "hausman",
"haver", "he", "heck_d2", "heckma_p", "heckman", "heckp_lf",
"heckpr_p", "heckprob", "hel", "help", "hereg", "hetpr_lf",
"hetpr_p", "hetprob", "hettest", "hexdump", "hilite",
"hist", "hist_7", "histogram", "hlogit", "hlu", "hmeans",
"hotel", "hotelling", "hprobit", "hreg", "hsearch", "icd9",
"icd9_ff", "icd9p", "iis", "impute", "imtest", "inbase",
"include", "inf", "infi", "infil", "infile", "infix", "inp",
"inpu", "input", "ins", "insheet", "insp", "inspe",
"inspec", "inspect", "integ", "inten", "intreg", "intreg_7",
"intreg_p", "intrg2_ll", "intrg_ll", "intrg_ll2", "ipolate",
"iqreg", "ir", "irf", "irf_create", "irfm", "iri", "is_svy",
"is_svysum", "isid", "istdize", "ivprob_1_lf", "ivprob_lf",
"ivprobit", "ivprobit_p", "ivreg", "ivreg_footnote",
"ivtob_1_lf", "ivtob_lf", "ivtobit", "ivtobit_p", "jackknife",
"jacknife", "jknife", "jknife_6", "jknife_8", "jkstat",
"joinby", "kalarma1", "kap", "kap_3", "kapmeier", "kappa",
"kapwgt", "kdensity", "kdensity_7", "keep", "ksm", "ksmirnov",
"ktau", "kwallis", "l", "la", "lab", "labe", "label",
"labelbook", "ladder", "levels", "levelsof", "leverage",
"lfit", "lfit_p", "li", "lincom", "line", "linktest",
"lis", "list", "lloghet_glf", "lloghet_glf_sh", "lloghet_gp",
"lloghet_ilf", "lloghet_ilf_sh", "lloghet_ip", "llogi_sw",
"llogis_p", "llogist", "llogistic", "llogistichet",
"lnorm_lf", "lnorm_sw", "lnorma_p", "lnormal", "lnormalhet",
"lnormhet_glf", "lnormhet_glf_sh", "lnormhet_gp",
"lnormhet_ilf", "lnormhet_ilf_sh", "lnormhet_ip", "lnskew0",
"loadingplot", "loc", "loca", "local", "log", "logi",
"logis_lf", "logistic", "logistic_p", "logit", "logit_estat",
"logit_p", "loglogs", "logrank", "loneway", "lookfor",
"lookup", "lowess", "lowess_7", "lpredict", "lrecomp", "lroc",
"lroc_7", "lrtest", "ls", "lsens", "lsens_7", "lsens_x",
"lstat", "ltable", "ltable_7", "ltriang", "lv", "lvr2plot",
"lvr2plot_7", "m", "ma", "mac", "macr", "macro", "makecns",
"man", "manova", "manova_estat", "manova_p", "manovatest",
"mantel", "mark", "markin", "markout", "marksample", "mat",
"mat_capp", "mat_order", "mat_put_rr", "mat_rapp", "mata",
"mata_clear", "mata_describe", "mata_drop", "mata_matdescribe",
"mata_matsave", "mata_matuse", "mata_memory", "mata_mlib",
"mata_mosave", "mata_rename", "mata_which", "matalabel",
"matcproc", "matlist", "matname", "matr", "matri",
"matrix", "matrix_input__dlg", "matstrik", "mcc", "mcci",
"md0_", "md1_", "md1debug_", "md2_", "md2debug_", "mds",
"mds_estat", "mds_p", "mdsconfig", "mdslong", "mdsmat",
"mdsshepard", "mdytoe", "mdytof", "me_derd", "mean",
"means", "median", "memory", "memsize", "meqparse", "mer",
"merg", "merge", "mfp", "mfx", "mhelp", "mhodds", "minbound",
"mixed_ll", "mixed_ll_reparm", "mkassert", "mkdir",
"mkmat", "mkspline", "ml", "ml_5", "ml_adjs", "ml_bhhhs",
"ml_c_d", "ml_check", "ml_clear", "ml_cnt", "ml_debug",
"ml_defd", "ml_e0", "ml_e0_bfgs", "ml_e0_cycle", "ml_e0_dfp",
"ml_e0i", "ml_e1", "ml_e1_bfgs", "ml_e1_bhhh", "ml_e1_cycle",
"ml_e1_dfp", "ml_e2", "ml_e2_cycle", "ml_ebfg0", "ml_ebfr0",
"ml_ebfr1", "ml_ebh0q", "ml_ebhh0", "ml_ebhr0", "ml_ebr0i",
"ml_ecr0i", "ml_edfp0", "ml_edfr0", "ml_edfr1", "ml_edr0i",
"ml_eds", "ml_eer0i", "ml_egr0i", "ml_elf", "ml_elf_bfgs",
"ml_elf_bhhh", "ml_elf_cycle", "ml_elf_dfp", "ml_elfi",
"ml_elfs", "ml_enr0i", "ml_enrr0", "ml_erdu0", "ml_erdu0_bfgs",
"ml_erdu0_bhhh", "ml_erdu0_bhhhq", "ml_erdu0_cycle",
"ml_erdu0_dfp", "ml_erdu0_nrbfgs", "ml_exde", "ml_footnote",
"ml_geqnr", "ml_grad0", "ml_graph", "ml_hbhhh", "ml_hd0",
"ml_hold", "ml_init", "ml_inv", "ml_log", "ml_max",
"ml_mlout", "ml_mlout_8", "ml_model", "ml_nb0", "ml_opt",
"ml_p", "ml_plot", "ml_query", "ml_rdgrd", "ml_repor",
"ml_s_e", "ml_score", "ml_searc", "ml_technique", "ml_unhold",
"mleval", "mlf_", "mlmatbysum", "mlmatsum", "mlog", "mlogi",
"mlogit", "mlogit_footnote", "mlogit_p", "mlopts", "mlsum",
"mlvecsum", "mnl0_", "mor", "more", "mov", "move", "mprobit",
"mprobit_lf", "mprobit_p", "mrdu0_", "mrdu1_", "mvdecode",
"mvencode", "mvreg", "mvreg_estat", "n", "nbreg",
"nbreg_al", "nbreg_lf", "nbreg_p", "nbreg_sw", "nestreg", "net",
"newey", "newey_7", "newey_p", "news", "nl", "nl_7", "nl_9",
"nl_9_p", "nl_p", "nl_p_7", "nlcom", "nlcom_p", "nlexp2",
"nlexp2_7", "nlexp2a", "nlexp2a_7", "nlexp3", "nlexp3_7",
"nlgom3", "nlgom3_7", "nlgom4", "nlgom4_7", "nlinit", "nllog3",
"nllog3_7", "nllog4", "nllog4_7", "nlog_rd", "nlogit",
"nlogit_p", "nlogitgen", "nlogittree", "nlpred", "no",
"nobreak", "noi", "nois", "noisi", "noisil", "noisily", "note",
"notes", "notes_dlg", "nptrend", "numlabel", "numlist", "odbc",
"old_ver", "olo", "olog", "ologi", "ologi_sw", "ologit",
"ologit_p", "ologitp", "on", "one", "onew", "onewa", "oneway",
"op_colnm", "op_comp", "op_diff", "op_inv", "op_str", "opr",
"opro", "oprob", "oprob_sw", "oprobi", "oprobi_p", "oprobit",
"oprobitp", "opts_exclusive", "order", "orthog", "orthpoly",
"ou", "out", "outf", "outfi", "outfil", "outfile", "outs",
"outsh", "outshe", "outshee", "outsheet", "ovtest", "pac",
"pac_7", "palette", "parse", "parse_dissim", "pause", "pca",
"pca_8", "pca_display", "pca_estat", "pca_p", "pca_rotate",
"pcamat", "pchart", "pchart_7", "pchi", "pchi_7", "pcorr",
"pctile", "pentium", "pergram", "pergram_7", "permute",
"permute_8", "personal", "peto_st", "pkcollapse", "pkcross",
"pkequiv", "pkexamine", "pkexamine_7", "pkshape", "pksumm",
"pksumm_7", "pl", "plo", "plot", "plugin", "pnorm",
"pnorm_7", "poisgof", "poiss_lf", "poiss_sw", "poisso_p",
"poisson", "poisson_estat", "post", "postclose", "postfile",
"postutil", "pperron", "pr", "prais", "prais_e", "prais_e2",
"prais_p", "predict", "predictnl", "preserve", "print",
"pro", "prob", "probi", "probit", "probit_estat", "probit_p",
"proc_time", "procoverlay", "procrustes", "procrustes_estat",
"procrustes_p", "profiler", "prog", "progr", "progra",
"program", "prop", "proportion", "prtest", "prtesti", "pwcorr",
"pwd", "q", "s", "qby", "qbys", "qchi", "qchi_7", "qladder",
"qladder_7", "qnorm", "qnorm_7", "qqplot", "qqplot_7", "qreg",
"qreg_c", "qreg_p", "qreg_sw", "qu", "quadchk", "quantile",
"quantile_7", "que", "quer", "query", "range", "ranksum",
"ratio", "rchart", "rchart_7", "rcof", "recast", "reclink",
"recode", "reg", "reg3", "reg3_p", "regdw", "regr", "regre",
"regre_p2", "regres", "regres_p", "regress", "regress_estat",
"regriv_p", "remap", "ren", "rena", "renam", "rename",
"renpfix", "repeat", "replace", "report", "reshape",
"restore", "ret", "retu", "retur", "return", "rm", "rmdir",
"robvar", "roccomp", "roccomp_7", "roccomp_8", "rocf_lf",
"rocfit", "rocfit_8", "rocgold", "rocplot", "rocplot_7",
"roctab", "roctab_7", "rolling", "rologit", "rologit_p",
"rot", "rota", "rotat", "rotate", "rotatemat", "rreg",
"rreg_p", "ru", "run", "runtest", "rvfplot", "rvfplot_7",
"rvpplot", "rvpplot_7", "sa", "safesum", "sample",
"sampsi", "sav", "save", "savedresults", "saveold", "sc",
"sca", "scal", "scala", "scalar", "scatter", "scm_mine",
"sco", "scob_lf", "scob_p", "scobi_sw", "scobit", "scor",
"score", "scoreplot", "scoreplot_help", "scree", "screeplot",
"screeplot_help", "sdtest", "sdtesti", "se", "search",
"separate", "seperate", "serrbar", "serrbar_7", "serset", "set",
"set_defaults", "sfrancia", "sh", "she", "shel", "shell",
"shewhart", "shewhart_7", "signestimationsample", "signrank",
"signtest", "simul", "simul_7", "simulate", "simulate_8",
"sktest", "sleep", "slogit", "slogit_d2", "slogit_p", "smooth",
"snapspan", "so", "sor", "sort", "spearman", "spikeplot",
"spikeplot_7", "spikeplt", "spline_x", "split", "sqreg",
"sqreg_p", "sret", "sretu", "sretur", "sreturn", "ssc", "st",
"st_ct", "st_hc", "st_hcd", "st_hcd_sh", "st_is", "st_issys",
"st_note", "st_promo", "st_set", "st_show", "st_smpl",
"st_subid", "stack", "statsby", "statsby_8", "stbase", "stci",
"stci_7", "stcox", "stcox_estat", "stcox_fr", "stcox_fr_ll",
"stcox_p", "stcox_sw", "stcoxkm", "stcoxkm_7", "stcstat",
"stcurv", "stcurve", "stcurve_7", "stdes", "stem", "stepwise",
"stereg", "stfill", "stgen", "stir", "stjoin", "stmc", "stmh",
"stphplot", "stphplot_7", "stphtest", "stphtest_7",
"stptime", "strate", "strate_7", "streg", "streg_sw", "streset",
"sts", "sts_7", "stset", "stsplit", "stsum", "sttocc",
"sttoct", "stvary", "stweib", "su", "suest", "suest_8",
"sum", "summ", "summa", "summar", "summari", "summariz",
"summarize", "sunflower", "sureg", "survcurv", "survsum",
"svar", "svar_p", "svmat", "svy", "svy_disp", "svy_dreg",
"svy_est", "svy_est_7", "svy_estat", "svy_get", "svy_gnbreg_p",
"svy_head", "svy_header", "svy_heckman_p", "svy_heckprob_p",
"svy_intreg_p", "svy_ivreg_p", "svy_logistic_p", "svy_logit_p",
"svy_mlogit_p", "svy_nbreg_p", "svy_ologit_p", "svy_oprobit_p",
"svy_poisson_p", "svy_probit_p", "svy_regress_p", "svy_sub",
"svy_sub_7", "svy_x", "svy_x_7", "svy_x_p", "svydes",
"svydes_8", "svygen", "svygnbreg", "svyheckman", "svyheckprob",
"svyintreg", "svyintreg_7", "svyintrg", "svyivreg", "svylc",
"svylog_p", "svylogit", "svymarkout", "svymarkout_8",
"svymean", "svymlog", "svymlogit", "svynbreg", "svyolog",
"svyologit", "svyoprob", "svyoprobit", "svyopts",
"svypois", "svypois_7", "svypoisson", "svyprobit", "svyprobt",
"svyprop", "svyprop_7", "svyratio", "svyreg", "svyreg_p",
"svyregress", "svyset", "svyset_7", "svyset_8", "svytab",
"svytab_7", "svytest", "svytotal", "sw", "sw_8", "swcnreg",
"swcox", "swereg", "swilk", "swlogis", "swlogit",
"swologit", "swoprbt", "swpois", "swprobit", "swqreg",
"swtobit", "swweib", "symmetry", "symmi", "symplot",
"symplot_7", "syntax", "sysdescribe", "sysdir", "sysuse",
"szroeter", "ta", "tab", "tab1", "tab2", "tab_or", "tabd",
"tabdi", "tabdis", "tabdisp", "tabi", "table", "tabodds",
"tabodds_7", "tabstat", "tabu", "tabul", "tabula", "tabulat",
"tabulate", "te", "tempfile", "tempname", "tempvar", "tes",
"test", "testnl", "testparm", "teststd", "tetrachoric",
"time_it", "timer", "tis", "tob", "tobi", "tobit", "tobit_p",
"tobit_sw", "token", "tokeni", "tokeniz", "tokenize",
"tostring", "total", "translate", "translator", "transmap",
"treat_ll", "treatr_p", "treatreg", "trim", "trnb_cons",
"trnb_mean", "trpoiss_d2", "trunc_ll", "truncr_p", "truncreg",
"tsappend", "tset", "tsfill", "tsline", "tsline_ex",
"tsreport", "tsrevar", "tsrline", "tsset", "tssmooth",
"tsunab", "ttest", "ttesti", "tut_chk", "tut_wait", "tutorial",
"tw", "tware_st", "two", "twoway", "twoway__fpfit_serset",
"twoway__function_gen", "twoway__histogram_gen",
"twoway__ipoint_serset", "twoway__ipoints_serset",
"twoway__kdensity_gen", "twoway__lfit_serset",
"twoway__normgen_gen", "twoway__pci_serset",
"twoway__qfit_serset", "twoway__scatteri_serset",
"twoway__sunflower_gen", "twoway_ksm_serset", "ty", "typ",
"type", "typeof", "u", "unab", "unabbrev", "unabcmd",
"update", "us", "use", "uselabel", "var", "var_mkcompanion",
"var_p", "varbasic", "varfcast", "vargranger", "varirf",
"varirf_add", "varirf_cgraph", "varirf_create", "varirf_ctable",
"varirf_describe", "varirf_dir", "varirf_drop", "varirf_erase",
"varirf_graph", "varirf_ograph", "varirf_rename", "varirf_set",
"varirf_table", "varlist", "varlmar", "varnorm", "varsoc",
"varstable", "varstable_w", "varstable_w2", "varwle",
"vce", "vec", "vec_fevd", "vec_mkphi", "vec_p", "vec_p_w",
"vecirf_create", "veclmar", "veclmar_w", "vecnorm",
"vecnorm_w", "vecrank", "vecstable", "verinst", "vers",
"versi", "versio", "version", "view", "viewsource", "vif",
"vwls", "wdatetof", "webdescribe", "webseek", "webuse",
"weib1_lf", "weib2_lf", "weib_lf", "weib_lf0", "weibhet_glf",
"weibhet_glf_sh", "weibhet_glfa", "weibhet_glfa_sh",
"weibhet_gp", "weibhet_ilf", "weibhet_ilf_sh", "weibhet_ilfa",
"weibhet_ilfa_sh", "weibhet_ip", "weibu_sw", "weibul_p",
"weibull", "weibull_c", "weibull_s", "weibullhet",
"wh", "whelp", "whi", "which", "whil", "while", "wilc_st",
"wilcoxon", "win", "wind", "windo", "window", "winexec",
"wntestb", "wntestb_7", "wntestq", "xchart", "xchart_7",
"xcorr", "xcorr_7", "xi", "xi_6", "xmlsav", "xmlsave",
"xmluse", "xpose", "xsh", "xshe", "xshel", "xshell",
"xt_iis", "xt_tis", "xtab_p", "xtabond", "xtbin_p",
"xtclog", "xtcloglog", "xtcloglog_8", "xtcloglog_d2",
"xtcloglog_pa_p", "xtcloglog_re_p", "xtcnt_p", "xtcorr",
"xtdata", "xtdes", "xtfront_p", "xtfrontier", "xtgee",
"xtgee_elink", "xtgee_estat", "xtgee_makeivar", "xtgee_p",
"xtgee_plink", "xtgls", "xtgls_p", "xthaus", "xthausman",
"xtht_p", "xthtaylor", "xtile", "xtint_p", "xtintreg",
"xtintreg_8", "xtintreg_d2", "xtintreg_p", "xtivp_1",
"xtivp_2", "xtivreg", "xtline", "xtline_ex", "xtlogit",
"xtlogit_8", "xtlogit_d2", "xtlogit_fe_p", "xtlogit_pa_p",
"xtlogit_re_p", "xtmixed", "xtmixed_estat", "xtmixed_p",
"xtnb_fe", "xtnb_lf", "xtnbreg", "xtnbreg_pa_p",
"xtnbreg_refe_p", "xtpcse", "xtpcse_p", "xtpois", "xtpoisson",
"xtpoisson_d2", "xtpoisson_pa_p", "xtpoisson_refe_p", "xtpred",
"xtprobit", "xtprobit_8", "xtprobit_d2", "xtprobit_re_p",
"xtps_fe", "xtps_lf", "xtps_ren", "xtps_ren_8", "xtrar_p",
"xtrc", "xtrc_p", "xtrchh", "xtrefe_p", "xtreg", "xtreg_be",
"xtreg_fe", "xtreg_ml", "xtreg_pa_p", "xtreg_re",
"xtregar", "xtrere_p", "xtset", "xtsf_ll", "xtsf_llti",
"xtsum", "xttab", "xttest0", "xttobit", "xttobit_8",
"xttobit_p", "xttrans", "yx", "yxview__barlike_draw",
"yxview_area_draw", "yxview_bar_draw", "yxview_dot_draw",
"yxview_dropline_draw", "yxview_function_draw",
"yxview_iarrow_draw", "yxview_ilabels_draw",
"yxview_normal_draw", "yxview_pcarrow_draw",
"yxview_pcbarrow_draw", "yxview_pccapsym_draw",
"yxview_pcscatter_draw", "yxview_pcspike_draw",
"yxview_rarea_draw", "yxview_rbar_draw", "yxview_rbarm_draw",
"yxview_rcap_draw", "yxview_rcapsym_draw",
"yxview_rconnected_draw", "yxview_rline_draw",
"yxview_rscatter_draw", "yxview_rspike_draw",
"yxview_spike_draw", "yxview_sunflower_draw", "zap_s", "zinb",
"zinb_llf", "zinb_plf", "zip", "zip_llf", "zip_p", "zip_plf",
"zt_ct_5", "zt_hc_5", "zt_hcd_5", "zt_is_5", "zt_iss_5",
"zt_sho_5", "zt_smp_5", "ztbase_5", "ztcox_5", "ztdes_5",
"ztereg_5", "ztfill_5", "ztgen_5", "ztir_5", "ztjoin_5", "ztnb",
"ztnb_p", "ztp", "ztp_p", "zts_5", "ztset_5", "ztspli_5",
"ztsum_5", "zttoct_5", "ztvary_5", "ztweib_5"
)
builtins_functions = (
"abbrev", "abs", "acos", "acosh", "asin", "asinh", "atan",
"atan2", "atanh", "autocode", "betaden", "binomial",
"binomialp", "binomialtail", "binormal", "bofd",
"byteorder", "c", "_caller", "cauchy", "cauchyden",
"cauchytail", "Cdhms", "ceil", "char", "chi2", "chi2den",
"chi2tail", "Chms", "chop", "cholesky", "clip", "Clock",
"clock", "cloglog", "Cmdyhms", "Cofc", "cofC", "Cofd", "cofd",
"coleqnumb", "collatorlocale", "collatorversion",
"colnfreeparms", "colnumb", "colsof", "comb", "cond", "corr",
"cos", "cosh", "daily", "date", "day", "det", "dgammapda",
"dgammapdada", "dgammapdadx", "dgammapdx", "dgammapdxdx",
"dhms", "diag", "diag0cnt", "digamma", "dofb", "dofC", "dofc",
"dofh", "dofm", "dofq", "dofw", "dofy", "dow", "doy",
"dunnettprob", "e", "el", "esample", "epsdouble", "epsfloat",
"exp", "expm1", "exponential", "exponentialden",
"exponentialtail", "F", "Fden", "fileexists", "fileread",
"filereaderror", "filewrite", "float", "floor", "fmtwidth",
"frval", "_frval", "Ftail", "gammaden", "gammap", "gammaptail",
"get", "hadamard", "halfyear", "halfyearly", "has_eprop", "hh",
"hhC", "hms", "hofd", "hours", "hypergeometric",
"hypergeometricp", "I", "ibeta", "ibetatail", "igaussian",
"igaussianden", "igaussiantail", "indexnot", "inlist",
"inrange", "int", "inv", "invbinomial", "invbinomialtail",
"invcauchy", "invcauchytail", "invchi2", "invchi2tail",
"invcloglog", "invdunnettprob", "invexponential",
"invexponentialtail", "invF", "invFtail", "invgammap",
"invgammaptail", "invibeta", "invibetatail", "invigaussian",
"invigaussiantail", "invlaplace", "invlaplacetail",
"invlogisticp", "invlogisticsp", "invlogisticmsp",
"invlogistictailp", "invlogistictailsp", "invlogistictailmsp",
"invlogit", "invnbinomial", "invnbinomialtail", "invnchi2",
"invnchi2tail", "invnF", "invnFtail", "invnibeta",
"invnormal", "invnt", "invnttail", "invpoisson",
"invpoissontail", "invsym", "invt", "invttail", "invtukeyprob",
"invweibullabp", "invweibullabgp", "invweibullphabp",
"invweibullphabgp", "invweibullphtailabp",
"invweibullphtailabgp", "invweibulltailabp",
"invweibulltailabgp", "irecode", "issymmetric", "J", "laplace",
"laplaceden", "laplacetail", "ln", "ln1m", "ln1p", "lncauchyden",
"lnfactorial", "lngamma", "lnigammaden", "lnigaussianden",
"lniwishartden", "lnlaplaceden", "lnmvnormalden", "lnnormal",
"lnnormalden", "lnnormaldenxs", "lnnormaldenxms", "lnwishartden",
"log", "log10", "log1m", "log1p", "logisticx", "logisticsx",
"logisticmsx", "logisticdenx", "logisticdensx", "logisticdenmsx",
"logistictailx", "logistictailsx", "logistictailmsx", "logit",
"matmissing", "matrix", "matuniform", "max", "maxbyte",
"maxdouble", "maxfloat", "maxint", "maxlong", "mdy", "mdyhms",
"mi", "min", "minbyte", "mindouble", "minfloat", "minint",
"minlong", "minutes", "missing", "mm", "mmC", "mod", "mofd",
"month", "monthly", "mreldif", "msofhours", "msofminutes",
"msofseconds", "nbetaden", "nbinomial", "nbinomialp",
"nbinomialtail", "nchi2", "nchi2den", "nchi2tail", "nF",
"nFden", "nFtail", "nibeta", "normal", "normalden",
"normaldenxs", "normaldenxms", "npnchi2", "npnF", "npnt",
"nt", "ntden", "nttail", "nullmat", "plural", "plurals1",
"poisson", "poissonp", "poissontail", "qofd", "quarter",
"quarterly", "r", "rbeta", "rbinomial", "rcauchy", "rchi2",
"recode", "real", "regexm", "regexr", "regexs", "reldif",
"replay", "return", "rexponential", "rgamma", "rhypergeometric",
"rigaussian", "rlaplace", "rlogistic", "rlogistics",
"rlogisticms", "rnbinomial", "rnormal", "rnormalm", "rnormalms",
"round", "roweqnumb", "rownfreeparms", "rownumb", "rowsof",
"rpoisson", "rt", "runiform", "runiformab", "runiformint",
"rweibullab", "rweibullabg", "rweibullphab", "rweibullphabg",
"s", "scalar", "seconds", "sign", "sin", "sinh",
"smallestdouble", "soundex", "soundex_nara", "sqrt", "ss",
"ssC", "strcat", "strdup", "string", "stringns", "stritrim",
"strlen", "strlower", "strltrim", "strmatch", "strofreal",
"strofrealns", "strpos", "strproper", "strreverse", "strrpos",
"strrtrim", "strtoname", "strtrim", "strupper", "subinstr",
"subinword", "substr", "sum", "sweep", "t", "tan", "tanh",
"tC", "tc", "td", "tden", "th", "tin", "tm", "tobytes", "tq",
"trace", "trigamma", "trunc", "ttail", "tukeyprob", "tw",
"twithin", "uchar", "udstrlen", "udsubstr", "uisdigit",
"uisletter", "ustrcompare", "ustrfix", "ustrfrom",
"ustrinvalidcnt", "ustrleft", "ustrlen", "ustrlower",
"ustrltrim", "ustrnormalize", "ustrpos", "ustrregexm",
"ustrregexra", "ustrregexrf", "ustrregexs", "ustrreverse",
"ustrright", "ustrrpos", "ustrrtrim", "ustrsortkey",
"ustrtitle", "ustrto", "ustrtohex", "ustrtoname",
"ustrtrim", "ustrunescape", "ustrupper", "ustrword",
"ustrwordcount", "usubinstr", "usubstr", "vec", "vecdiag",
"week", "weekly", "weibullabx", "weibullabgx", "weibulldenabx",
"weibulldenabgx", "weibullphabx", "weibullphabgx",
"weibullphdenabx", "weibullphdenabgx", "weibullphtailabx",
"weibullphtailabgx", "weibulltailabx", "weibulltailabgx",
"wofd", "word", "wordbreaklocale", "wordcount",
"year", "yearly", "yh", "ym", "yofd", "yq", "yw"
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,112 @@
"""
pygments.lexers._usd_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A collection of known USD-related keywords, attributes, and types.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
COMMON_ATTRIBUTES = [
"extent",
"xformOpOrder",
]
KEYWORDS = [
"class",
"clips",
"custom",
"customData",
"def",
"dictionary",
"inherits",
"over",
"payload",
"references",
"rel",
"subLayers",
"timeSamples",
"uniform",
"variantSet",
"variantSets",
"variants",
]
OPERATORS = [
"add",
"append",
"delete",
"prepend",
"reorder",
]
SPECIAL_NAMES = [
"active",
"apiSchemas",
"defaultPrim",
"elementSize",
"endTimeCode",
"hidden",
"instanceable",
"interpolation",
"kind",
"startTimeCode",
"upAxis",
]
TYPES = [
"asset",
"bool",
"color3d",
"color3f",
"color3h",
"color4d",
"color4f",
"color4h",
"double",
"double2",
"double3",
"double4",
"float",
"float2",
"float3",
"float4",
"frame4d",
"half",
"half2",
"half3",
"half4",
"int",
"int2",
"int3",
"int4",
"keyword",
"matrix2d",
"matrix3d",
"matrix4d",
"normal3d",
"normal3f",
"normal3h",
"point3d",
"point3f",
"point3h",
"quatd",
"quatf",
"quath",
"string",
"syn",
"token",
"uchar",
"uchar2",
"uchar3",
"uchar4",
"uint",
"uint2",
"uint3",
"uint4",
"usdaType",
"vector3d",
"vector3f",
"vector3h",
]

View file

@ -0,0 +1,279 @@
"""
pygments.lexers._vbscript_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are manually translated lists from
http://www.indusoft.com/pdf/VBScript%20Reference.pdf.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = [
'ByRef',
'ByVal',
# dim: special rule
'call',
'case',
'class',
# const: special rule
'do',
'each',
'else',
'elseif',
'end',
'erase',
'execute',
'function',
'exit',
'for',
'function',
'GetRef',
'global',
'if',
'let',
'loop',
'next',
'new',
# option: special rule
'private',
'public',
'redim',
'select',
'set',
'sub',
'then',
'wend',
'while',
'with',
]
BUILTIN_FUNCTIONS = [
'Abs',
'Array',
'Asc',
'Atn',
'CBool',
'CByte',
'CCur',
'CDate',
'CDbl',
'Chr',
'CInt',
'CLng',
'Cos',
'CreateObject',
'CSng',
'CStr',
'Date',
'DateAdd',
'DateDiff',
'DatePart',
'DateSerial',
'DateValue',
'Day',
'Eval',
'Exp',
'Filter',
'Fix',
'FormatCurrency',
'FormatDateTime',
'FormatNumber',
'FormatPercent',
'GetObject',
'GetLocale',
'Hex',
'Hour',
'InStr',
'inStrRev',
'Int',
'IsArray',
'IsDate',
'IsEmpty',
'IsNull',
'IsNumeric',
'IsObject',
'Join',
'LBound',
'LCase',
'Left',
'Len',
'LoadPicture',
'Log',
'LTrim',
'Mid',
'Minute',
'Month',
'MonthName',
'MsgBox',
'Now',
'Oct',
'Randomize',
'RegExp',
'Replace',
'RGB',
'Right',
'Rnd',
'Round',
'RTrim',
'ScriptEngine',
'ScriptEngineBuildVersion',
'ScriptEngineMajorVersion',
'ScriptEngineMinorVersion',
'Second',
'SetLocale',
'Sgn',
'Space',
'Split',
'Sqr',
'StrComp',
'String',
'StrReverse',
'Tan',
'Time',
'Timer',
'TimeSerial',
'TimeValue',
'Trim',
'TypeName',
'UBound',
'UCase',
'VarType',
'Weekday',
'WeekdayName',
'Year',
]
BUILTIN_VARIABLES = [
'Debug',
'Dictionary',
'Drive',
'Drives',
'Err',
'File',
'Files',
'FileSystemObject',
'Folder',
'Folders',
'Match',
'Matches',
'RegExp',
'Submatches',
'TextStream',
]
OPERATORS = [
'+',
'-',
'*',
'/',
'\\',
'^',
'|',
'<',
'<=',
'>',
'>=',
'=',
'<>',
'&',
'$',
]
OPERATOR_WORDS = [
'mod',
'and',
'or',
'xor',
'eqv',
'imp',
'is',
'not',
]
BUILTIN_CONSTANTS = [
'False',
'True',
'vbAbort',
'vbAbortRetryIgnore',
'vbApplicationModal',
'vbArray',
'vbBinaryCompare',
'vbBlack',
'vbBlue',
'vbBoole',
'vbByte',
'vbCancel',
'vbCr',
'vbCritical',
'vbCrLf',
'vbCurrency',
'vbCyan',
'vbDataObject',
'vbDate',
'vbDefaultButton1',
'vbDefaultButton2',
'vbDefaultButton3',
'vbDefaultButton4',
'vbDouble',
'vbEmpty',
'vbError',
'vbExclamation',
'vbFalse',
'vbFirstFullWeek',
'vbFirstJan1',
'vbFormFeed',
'vbFriday',
'vbGeneralDate',
'vbGreen',
'vbIgnore',
'vbInformation',
'vbInteger',
'vbLf',
'vbLong',
'vbLongDate',
'vbLongTime',
'vbMagenta',
'vbMonday',
'vbMsgBoxHelpButton',
'vbMsgBoxRight',
'vbMsgBoxRtlReading',
'vbMsgBoxSetForeground',
'vbNewLine',
'vbNo',
'vbNull',
'vbNullChar',
'vbNullString',
'vbObject',
'vbObjectError',
'vbOK',
'vbOKCancel',
'vbOKOnly',
'vbQuestion',
'vbRed',
'vbRetry',
'vbRetryCancel',
'vbSaturday',
'vbShortDate',
'vbShortTime',
'vbSingle',
'vbString',
'vbSunday',
'vbSystemModal',
'vbTab',
'vbTextCompare',
'vbThursday',
'vbTrue',
'vbTuesday',
'vbUseDefault',
'vbUseSystem',
'vbUseSystem',
'vbVariant',
'vbVerticalTab',
'vbWednesday',
'vbWhite',
'vbYellow',
'vbYes',
'vbYesNo',
'vbYesNoCancel',
]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,244 @@
"""
pygments.lexers.actionscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ActionScript and MXML.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, using, this, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
.. versionadded:: 0.9
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\[^\\]|[^/\\\n])*/[gim]*', String.Regex),
(r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(words((
'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
'switch'), suffix=r'\b'),
Keyword),
(words((
'class', 'public', 'final', 'internal', 'native', 'override', 'private',
'protected', 'static', 'import', 'extends', 'implements', 'interface',
'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
'namespace', 'package', 'set'), suffix=r'\b'),
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(words((
'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
'XMLUI'), suffix=r'\b'),
Name.Builtin),
(words((
'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
'unescape'), suffix=r'\b'),
Name.Function),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
def analyse_text(text):
"""This is only used to disambiguate between ActionScript and
ActionScript3. We return 0 here; the ActionScript3 lexer will match
AS3 variable definitions and that will hopefully suffice."""
return 0
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
.. versionadded:: 0.11
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
identifier = r'[$a-zA-Z_]\w*'
typeidentifier = identifier + r'(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\[^\\]|[^\\\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s+', Text, '#pop:2'),
default('#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',', Operator, '#pop'),
default('#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
.. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}

View file

@ -0,0 +1,23 @@
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer
from pygments.lexers.jvm import IokeLexer, ClojureLexer
from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
from pygments.lexers.perl import PerlLexer, Perl6Lexer
from pygments.lexers.d import CrocLexer, MiniDLexer
from pygments.lexers.iolang import IoLexer
from pygments.lexers.tcl import TclLexer
from pygments.lexers.factor import FactorLexer
from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
__all__ = []

View file

@ -0,0 +1,239 @@
"""
pygments.lexers.algebra
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer algebra systems.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer', 'BCLexer']
class GAPLexer(RegexLexer):
"""
For `GAP <http://www.gap-system.org>`_ source code.
.. versionadded:: 2.0
"""
name = 'GAP'
aliases = ['gap']
filenames = ['*.g', '*.gd', '*.gi', '*.gap']
tokens = {
'root': [
(r'#.*$', Comment.Single),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
if|then|elif|else|fi|
for|while|do|od|
repeat|until|
break|continue|
function|local|return|end|
rec|
quit|QUIT|
IsBound|Unbind|
TryNextMethod|
Info|Assert
)\b''', Keyword),
(r'''(?x)\b(?:
true|false|fail|infinity
)\b''',
Name.Constant),
(r'''(?x)\b(?:
(Declare|Install)([A-Z][A-Za-z]+)|
BindGlobal|BIND_GLOBAL
)\b''',
Name.Builtin),
(r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
(r'''(?x)\b(?:
and|or|not|mod|in
)\b''',
Operator.Word),
(r'''(?x)
(?:\w+|`[^`]*`)
(?:::\w+|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
}
def analyse_text(text):
score = 0.0
# Declaration part
if re.search(
r"(InstallTrueMethod|Declare(Attribute|Category|Filter|Operation" +
r"|GlobalFunction|Synonym|SynonymAttr|Property))", text
):
score += 0.7
# Implementation part
if re.search(
r"(DeclareRepresentation|Install(GlobalFunction|Method|" +
r"ImmediateMethod|OtherMethod)|New(Family|Type)|Objectify)", text
):
score += 0.7
return min(score, 1.0)
class MathematicaLexer(RegexLexer):
"""
Lexer for `Mathematica <http://www.wolfram.com/mathematica/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Mathematica'
aliases = ['mathematica', 'mma', 'nb']
filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
mimetypes = ['application/mathematica',
'application/vnd.wolfram.mathematica',
'application/vnd.wolfram.mathematica.package',
'application/vnd.wolfram.cdf']
# http://reference.wolfram.com/mathematica/guide/Syntax.html
operators = (
";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
"^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
"@@@", "~~", "===", "&", "<", ">", "<=", ">=",
)
punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
def _multi_escape(entries):
return '(%s)' % ('|'.join(re.escape(entry) for entry in entries))
tokens = {
'root': [
(r'(?s)\(\*.*?\*\)', Comment),
(r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
(r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
(r'#\d*', Name.Variable),
(r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
(r'-?\d+\.\d*', Number.Float),
(r'-?\d*\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(words(operators), Operator),
(words(punctuation), Punctuation),
(r'".*?"', String),
(r'\s+', Text.Whitespace),
],
}
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
.. versionadded:: 0.8
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
# (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class BCLexer(RegexLexer):
"""
A `BC <https://www.gnu.org/software/bc/>`_ lexer.
.. versionadded:: 2.1
"""
name = 'BC'
aliases = ['bc']
filenames = ['*.bc']
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'[{}();,]', Punctuation),
(words(('if', 'else', 'while', 'for', 'break', 'continue',
'halt', 'return', 'define', 'auto', 'print', 'read',
'length', 'scale', 'sqrt', 'limits', 'quit',
'warranty'), suffix=r'\b'), Keyword),
(r'\+\+|--|\|\||&&|'
r'([-<>+*%\^/!=])=?', Operator),
# bc doesn't support exponential
(r'[0-9]+(\.[0-9]*)?', Number),
(r'\.[0-9]+', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}

View file

@ -0,0 +1,75 @@
"""
pygments.lexers.ambient
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for AmbientTalk language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['AmbientTalkLexer']
class AmbientTalkLexer(RegexLexer):
"""
Lexer for `AmbientTalk <https://code.google.com/p/ambienttalk>`_ source code.
.. versionadded:: 2.0
"""
name = 'AmbientTalk'
filenames = ['*.at']
aliases = ['at', 'ambienttalk', 'ambienttalk/2']
mimetypes = ['text/x-ambienttalk']
flags = re.MULTILINE | re.DOTALL
builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
'mirroredBy:', 'is:'))
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(def|deftype|import|alias|exclude)\b', Keyword),
(builtin, Name.Builtin),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'\|', Punctuation, 'arglist'),
(r'<:|[*^!%&<>+=,./?-]|:=', Operator),
(r"`[a-zA-Z_]\w*", String.Symbol),
(r"[a-zA-Z_]\w*:", Name.Function),
(r"[{}()\[\];`]", Punctuation),
(r'(self|super)\b', Name.Variable.Instance),
(r"[a-zA-Z_]\w*", Name.Variable),
(r"@[a-zA-Z_]\w*", Name.Class),
(r"@\[", Name.Class, 'annotations'),
include('numbers'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'namespace': [
(r'[a-zA-Z_]\w*\.', Name.Namespace),
(r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
(r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
],
'annotations': [
(r"(.*?)\]", Name.Class, '#pop')
],
'arglist': [
(r'\|', Punctuation, '#pop'),
(r'\s*(,)\s*', Punctuation),
(r'[a-zA-Z_]\w*', Name.Variable),
],
}

View file

@ -0,0 +1,48 @@
"""
pygments.lexers.amdgpu
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the AMDGPU ISA assembly.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Name, Text, Keyword, Whitespace, Number, Comment
import re
__all__ = ['AMDGPULexer']
class AMDGPULexer(RegexLexer):
"""
For AMD GPU assembly.
.. versionadded:: 2.8
"""
name = 'AMDGPU'
aliases = ['amdgpu']
filenames = ['*.isa']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'[\r\n]+', Text),
(r'(([a-z_0-9])*:([a-z_0-9])*)', Name.Attribute),
(r'(\[|\]|\(|\)|,|\:|\&)', Text),
(r'([;#]|//).*?\n', Comment.Single),
(r'((s_)?(ds|buffer|flat|image)_[a-z0-9_]+)', Keyword.Reserved),
(r'(_lo|_hi)', Name.Variable),
(r'(vmcnt|lgkmcnt|expcnt|vmcnt|lit|unorm|glc)', Name.Attribute),
(r'(label_[a-z0-9]+)', Keyword),
(r'(_L[0-9]*)', Name.Variable),
(r'(s|v)_[a-z0-9_]+', Keyword),
(r'(v[0-9.]+|vcc|exec|v)', Name.Variable),
(r's[0-9.]+|s', Name.Variable),
(r'[0-9]+\.[^0-9]+', Number.Float),
(r'(0[xX][a-z0-9]+)|([0-9]+)', Number.Integer)
]
}

View file

@ -0,0 +1,86 @@
"""
pygments.lexers.ampl
~~~~~~~~~~~~~~~~~~~~
Lexers for the AMPL language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['AmplLexer']
class AmplLexer(RegexLexer):
"""
For `AMPL <http://ampl.com/>`_ source code.
.. versionadded:: 2.2
"""
name = 'Ampl'
aliases = ['ampl']
filenames = ['*.run']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text.Whitespace),
(r'#.*?\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(words((
'call', 'cd', 'close', 'commands', 'data', 'delete', 'display',
'drop', 'end', 'environ', 'exit', 'expand', 'include', 'load',
'model', 'objective', 'option', 'problem', 'purge', 'quit',
'redeclare', 'reload', 'remove', 'reset', 'restore', 'shell',
'show', 'solexpand', 'solution', 'solve', 'update', 'unload',
'xref', 'coeff', 'coef', 'cover', 'obj', 'interval', 'default',
'from', 'to', 'to_come', 'net_in', 'net_out', 'dimen',
'dimension', 'check', 'complements', 'write', 'function',
'pipe', 'format', 'if', 'then', 'else', 'in', 'while', 'repeat',
'for'), suffix=r'\b'), Keyword.Reserved),
(r'(integer|binary|symbolic|ordered|circular|reversed|INOUT|IN|OUT|LOCAL)',
Keyword.Type),
(r'\".*?\"', String.Double),
(r'\'.*?\'', String.Single),
(r'[()\[\]{},;:]+', Punctuation),
(r'\b(\w+)(\.)(astatus|init0|init|lb0|lb1|lb2|lb|lrc|'
r'lslack|rc|relax|slack|sstatus|status|ub0|ub1|ub2|'
r'ub|urc|uslack|val)',
bygroups(Name.Variable, Punctuation, Keyword.Reserved)),
(r'(set|param|var|arc|minimize|maximize|subject to|s\.t\.|subj to|'
r'node|table|suffix|read table|write table)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Variable)),
(r'(param)(\s*)(:)(\s*)(\w+)(\s*)(:)(\s*)((\w|\s)+)',
bygroups(Keyword.Declaration, Text, Punctuation, Text,
Name.Variable, Text, Punctuation, Text, Name.Variable)),
(r'(let|fix|unfix)(\s*)((?:\{.*\})?)(\s*)(\w+)',
bygroups(Keyword.Declaration, Text, using(this), Text, Name.Variable)),
(words((
'abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2',
'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10',
'max', 'min', 'precision', 'round', 'sin', 'sinh', 'sqrt', 'tan',
'tanh', 'time', 'trunc', 'Beta', 'Cauchy', 'Exponential', 'Gamma',
'Irand224', 'Normal', 'Normal01', 'Poisson', 'Uniform', 'Uniform01',
'num', 'num0', 'ichar', 'char', 'length', 'substr', 'sprintf',
'match', 'sub', 'gsub', 'print', 'printf', 'next', 'nextw', 'prev',
'prevw', 'first', 'last', 'ord', 'ord0', 'card', 'arity',
'indexarity'), prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'(\+|\-|\*|/|\*\*|=|<=|>=|==|\||\^|<|>|\!|\.\.|:=|\&|\!=|<<|>>)',
Operator),
(words((
'or', 'exists', 'forall', 'and', 'in', 'not', 'within', 'union',
'diff', 'difference', 'symdiff', 'inter', 'intersect',
'intersection', 'cross', 'setof', 'by', 'less', 'sum', 'prod',
'product', 'div', 'mod'), suffix=r'\b'),
Keyword.Reserved), # Operator.Name but not enough emphasized with that
(r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
(r'\d+([eE][+-]?\d+)?', Number.Integer),
(r'[+-]?Infinity', Number.Integer),
(r'(\w+|(\.(?!\.)))', Text)
]
}

100
libs/pygments/lexers/apl.py Normal file
View file

@ -0,0 +1,100 @@
"""
pygments.lexers.apl
~~~~~~~~~~~~~~~~~~~
Lexers for APL.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['APLLexer']
class APLLexer(RegexLexer):
"""
A simple `APL <https://en.m.wikipedia.org/wiki/APL_(programming_language)>`_ lexer.
.. versionadded:: 2.0
"""
name = 'APL'
aliases = ['apl']
filenames = ['*.apl']
tokens = {
'root': [
# Whitespace
# ==========
(r'\s+', Text),
#
# Comment
# =======
# '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
(r'[⍝#].*$', Comment.Single),
#
# Strings
# =======
(r'\'((\'\')|[^\'])*\'', String.Single),
(r'"(("")|[^"])*"', String.Double), # supported by NGN APL
#
# Punctuation
# ===========
# This token type is used for diamond and parenthesis
# but not for bracket and ; (see below)
(r'[⋄◇()]', Punctuation),
#
# Array indexing
# ==============
# Since this token type is very important in APL, it is not included in
# the punctuation token type but rather in the following one
(r'[\[\];]', String.Regex),
#
# Distinguished names
# ===================
# following IBM APL2 standard
(r'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
#
# Labels
# ======
# following IBM APL2 standard
# (r'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
#
# Variables
# =========
# following IBM APL2 standard
(r'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
#
# Numbers
# =======
(r'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
r'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
Number),
#
# Operators
# ==========
(r'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘⌸&⌶@⌺⍥⍛⍢]', Name.Attribute), # closest token type
(r'[+\-×÷⌈⌊∣|?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗⊆⊇⍸√⌾…⍮]',
Operator),
#
# Constant
# ========
(r'', Name.Constant),
#
# Quad symbol
# ===========
(r'[⎕⍞]', Name.Variable.Global),
#
# Arrows left/right
# =================
(r'[←→]', Keyword.Declaration),
#
# D-Fn
# ====
(r'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
(r'[{}]', Keyword.Type),
],
}

View file

@ -0,0 +1,317 @@
"""
pygments.lexers.archetype
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Archetype-related syntaxes, including:
- ODIN syntax <https://github.com/openEHR/odin>
- ADL syntax <http://www.openehr.org/releases/trunk/architecture/am/adl2.pdf>
- cADL sub-syntax of ADL
For uses of this syntax, see the openEHR archetypes <http://www.openEHR.org/ckm>
Contributed by Thomas Beale <https://github.com/wolandscat>,
<https://bitbucket.org/thomas_beale>.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Name, Literal, Number, String, \
Punctuation, Keyword, Operator, Generic
__all__ = ['OdinLexer', 'CadlLexer', 'AdlLexer']
class AtomsLexer(RegexLexer):
"""
Lexer for Values used in ADL and ODIN.
.. versionadded:: 2.1
"""
tokens = {
# ----- pseudo-states for inclusion -----
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'[ \t]*--.*$', Comment),
],
'archetype_id': [
(r'[ \t]*([a-zA-Z]\w+(\.[a-zA-Z]\w+)*::)?[a-zA-Z]\w+(-[a-zA-Z]\w+){2}'
r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?', Name.Decorator),
],
'date_constraints': [
# ISO 8601-based date/time constraints
(r'[Xx?YyMmDdHhSs\d]{2,4}([:-][Xx?YyMmDdHhSs\d]{2}){2}', Literal.Date),
# ISO 8601-based duration constraints + optional trailing slash
(r'(P[YyMmWwDd]+(T[HhMmSs]+)?|PT[HhMmSs]+)/?', Literal.Date),
],
'ordered_values': [
# ISO 8601 date with optional 'T' ligature
(r'\d{4}-\d{2}-\d{2}T?', Literal.Date),
# ISO 8601 time
(r'\d{2}:\d{2}:\d{2}(\.\d+)?([+-]\d{4}|Z)?', Literal.Date),
# ISO 8601 duration
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'[+-]?\d*\.\d+%?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[+-]?\d+%?', Number.Integer),
],
'values': [
include('ordered_values'),
(r'([Tt]rue|[Ff]alse)', Literal),
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'[a-z][a-z0-9+.-]*:', Literal, 'uri'),
# term code
(r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)(\w[\w-]*)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation, Name.Decorator,
Punctuation)),
(r'\|', Punctuation, 'interval'),
# list continuation
(r'\.\.\.', Punctuation),
],
'constraint_values': [
(r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)',
bygroups(Punctuation, Name.Decorator, Punctuation), 'adl14_code_constraint'),
# ADL 1.4 ordinal constraint
(r'(\d*)(\|)(\[\w[\w-]*::\w[\w-]*\])((?:[,;])?)',
bygroups(Number, Punctuation, Name.Decorator, Punctuation)),
include('date_constraints'),
include('values'),
],
# ----- real states -----
'string': [
('"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
# all other characters
(r'[^\\"]+', String),
# stray backslash
(r'\\', String),
],
'uri': [
# effective URI terminators
(r'[,>\s]', Punctuation, '#pop'),
(r'[^>\s,]+', Literal),
],
'interval': [
(r'\|', Punctuation, '#pop'),
include('ordered_values'),
(r'\.\.', Punctuation),
(r'[<>=] *', Punctuation),
# handle +/-
(r'\+/-', Punctuation),
(r'\s+', Text),
],
'any_code': [
include('archetype_id'),
# if it is a code
(r'[a-z_]\w*[0-9.]+(@[^\]]+)?', Name.Decorator),
# if it is tuple with attribute names
(r'[a-z_]\w*', Name.Class),
# if it is an integer, i.e. Xpath child index
(r'[0-9]+', Text),
(r'\|', Punctuation, 'code_rubric'),
(r'\]', Punctuation, '#pop'),
# handle use_archetype statement
(r'\s*,\s*', Punctuation),
],
'code_rubric': [
(r'\|', Punctuation, '#pop'),
(r'[^|]+', String),
],
'adl14_code_constraint': [
(r'\]', Punctuation, '#pop'),
(r'\|', Punctuation, 'code_rubric'),
(r'(\w[\w-]*)([;,]?)', bygroups(Name.Decorator, Punctuation)),
include('whitespace'),
],
}
class OdinLexer(AtomsLexer):
"""
Lexer for ODIN syntax.
.. versionadded:: 2.1
"""
name = 'ODIN'
aliases = ['odin']
filenames = ['*.odin']
mimetypes = ['text/odin']
tokens = {
'path': [
(r'>', Punctuation, '#pop'),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'key'),
(r'\s*,\s*', Punctuation, '#pop'),
(r'\s+', Text, '#pop'),
],
'key': [
include('values'),
(r'\]', Punctuation, '#pop'),
],
'type_cast': [
(r'\)', Punctuation, '#pop'),
(r'[^)]+', Name.Class),
],
'root': [
include('whitespace'),
(r'([Tt]rue|[Ff]alse)', Literal),
include('values'),
# x-ref path
(r'/', Punctuation, 'path'),
# x-ref path starting with key
(r'\[', Punctuation, 'key'),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'=', Operator),
(r'\(', Punctuation, 'type_cast'),
(r',', Punctuation),
(r'<', Punctuation),
(r'>', Punctuation),
(r';', Punctuation),
],
}
class CadlLexer(AtomsLexer):
"""
Lexer for cADL syntax.
.. versionadded:: 2.1
"""
name = 'cADL'
aliases = ['cadl']
filenames = ['*.cadl']
tokens = {
'path': [
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'any_code'),
(r'\s+', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
(r'(cardinality|existence|occurrences|group|include|exclude|'
r'allow_archetype|use_archetype|use_node)\W', Keyword.Type),
(r'(and|or|not|there_exists|xor|implies|for_all)\W', Keyword.Type),
(r'(after|before|closed)\W', Keyword.Type),
(r'(not)\W', Operator),
(r'(matches|is_in)\W', Operator),
# is_in / not is_in char
('(\u2208|\u2209)', Operator),
# there_exists / not there_exists / for_all / and / or
('(\u2203|\u2204|\u2200|\u2227|\u2228|\u22BB|\223C)',
Operator),
# regex in slot or as string constraint
(r'(\{)(\s*/[^}]+/\s*)(\})',
bygroups(Punctuation, String.Regex, Punctuation)),
# regex in slot or as string constraint
(r'(\{)(\s*\^[^}]+\^\s*)(\})',
bygroups(Punctuation, String.Regex, Punctuation)),
(r'/', Punctuation, 'path'),
# for cardinality etc
(r'(\{)((?:\d+\.\.)?(?:\d+|\*))'
r'((?:\s*;\s*(?:ordered|unordered|unique)){,2})(\})',
bygroups(Punctuation, Number, Number, Punctuation)),
# [{ is start of a tuple value
(r'\[\{', Punctuation),
(r'\}\]', Punctuation),
(r'\{', Punctuation),
(r'\}', Punctuation),
include('constraint_values'),
# type name
(r'[A-Z]\w+(<[A-Z]\w+([A-Za-z_<>]*)>)?', Name.Class),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'\[', Punctuation, 'any_code'),
(r'(~|//|\\\\|\+|-|/|\*|\^|!=|=|<=|>=|<|>]?)', Operator),
(r'\(', Punctuation),
(r'\)', Punctuation),
# for lists of values
(r',', Punctuation),
(r'"', String, 'string'),
# for assumed value
(r';', Punctuation),
],
}
class AdlLexer(AtomsLexer):
"""
Lexer for ADL syntax.
.. versionadded:: 2.1
"""
name = 'ADL'
aliases = ['adl']
filenames = ['*.adl', '*.adls', '*.adlf', '*.adlx']
tokens = {
'whitespace': [
# blank line ends
(r'\s*\n', Text),
# comment-only line
(r'^[ \t]*--.*$', Comment),
],
'odin_section': [
# repeating the following two rules from the root state enable multi-line
# strings that start in the first column to be dealt with
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)[ \t]*\n', Generic.Heading),
(r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'),
(r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)),
(r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)),
# template overlay delimiter
(r'^----------*\n', Text, '#pop'),
(r'^.*\n', String),
default('#pop'),
],
'cadl_section': [
(r'^([ \t]*|[ \t]+.*)\n', using(CadlLexer)),
default('#pop'),
],
'rules_section': [
(r'^[ \t]+.*\n', using(CadlLexer)),
default('#pop'),
],
'metadata': [
(r'\)', Punctuation, '#pop'),
(r';', Punctuation),
(r'([Tt]rue|[Ff]alse)', Literal),
# numbers and version ids
(r'\d+(\.\d+)*', Literal),
# Guids
(r'(\d|[a-fA-F])+(-(\d|[a-fA-F])+){3,}', Literal),
(r'\w+', Name.Class),
(r'"', String, 'string'),
(r'=', Operator),
(r'[ \t]+', Text),
default('#pop'),
],
'root': [
(r'^(archetype|template_overlay|operational_template|template|'
r'speciali[sz]e)', Generic.Heading),
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)[ \t]*\n',
Generic.Heading, 'odin_section'),
(r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'),
(r'^(rules)[ \t]*\n', Generic.Heading, 'rules_section'),
include('archetype_id'),
(r'[ \t]*\(', Punctuation, 'metadata'),
include('whitespace'),
],
}

View file

@ -0,0 +1,116 @@
"""
pygments.lexers.arrow
~~~~~~~~~~~~~~~~~~~~~
Lexer for Arrow.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, include
from pygments.token import Text, Operator, Keyword, Punctuation, Name, \
String, Number
__all__ = ['ArrowLexer']
TYPES = r'\b(int|bool|char)((?:\[\])*)(?=\s+)'
IDENT = r'([a-zA-Z_][a-zA-Z0-9_]*)'
DECL = TYPES + r'(\s+)' + IDENT
class ArrowLexer(RegexLexer):
"""
Lexer for Arrow: https://pypi.org/project/py-arrow-lang/
.. versionadded:: 2.7
"""
name = 'Arrow'
aliases = ['arrow']
filenames = ['*.arw']
tokens = {
'root': [
(r'\s+', Text),
(r'^[|\s]+', Punctuation),
include('blocks'),
include('statements'),
include('expressions'),
],
'blocks': [
(r'(function)(\n+)(/-->)(\s*)' +
DECL + # 4 groups
r'(\()', bygroups(
Keyword.Reserved, Text, Punctuation,
Text, Keyword.Type, Punctuation, Text,
Name.Function, Punctuation
), 'fparams'),
(r'/-->$|\\-->$|/--<|\\--<|\^', Punctuation),
],
'statements': [
(DECL, bygroups(Keyword.Type, Punctuation, Text, Name.Variable)),
(r'\[', Punctuation, 'index'),
(r'=', Operator),
(r'require|main', Keyword.Reserved),
(r'print', Keyword.Reserved, 'print'),
],
'expressions': [
(r'\s+', Text),
(r'[0-9]+', Number.Integer),
(r'true|false', Keyword.Constant),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'\{', Punctuation, 'array'),
(r'==|!=|<|>|\+|-|\*|/|%', Operator),
(r'and|or|not|length', Operator.Word),
(r'(input)(\s+)(int|char\[\])', bygroups(
Keyword.Reserved, Text, Keyword.Type
)),
(IDENT + r'(\()', bygroups(
Name.Function, Punctuation
), 'fargs'),
(IDENT, Name.Variable),
(r'\[', Punctuation, 'index'),
(r'\(', Punctuation, 'expressions'),
(r'\)', Punctuation, '#pop'),
],
'print': [
include('expressions'),
(r',', Punctuation),
default('#pop'),
],
'fparams': [
(DECL, bygroups(Keyword.Type, Punctuation, Text, Name.Variable)),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'escape': [
(r'\\(["\\/abfnrtv]|[0-9]{1,3}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4})',
String.Escape),
],
'char': [
(r"'", String.Char, '#pop'),
include('escape'),
(r"[^'\\]", String.Char),
],
'string': [
(r'"', String.Double, '#pop'),
include('escape'),
(r'[^"\\]+', String.Double),
],
'array': [
include('expressions'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
],
'fargs': [
include('expressions'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
],
'index': [
include('expressions'),
(r'\]', Punctuation, '#pop'),
],
}

1004
libs/pygments/lexers/asm.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,379 @@
"""
pygments.lexers.automation
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for automation scripting languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, combined
from pygments.token import Text, Comment, Operator, Name, String, \
Number, Punctuation, Generic
__all__ = ['AutohotkeyLexer', 'AutoItLexer']
class AutohotkeyLexer(RegexLexer):
"""
For `autohotkey <http://www.autohotkey.com/>`_ source code.
.. versionadded:: 1.4
"""
name = 'autohotkey'
aliases = ['ahk', 'autohotkey']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
tokens = {
'root': [
(r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
(r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
(r'\s+;.*?$', Comment.Single),
(r'^;.*?$', Comment.Single),
(r'[]{}(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInVariables'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
include('garbage'),
],
'incomment': [
(r'^\s*\*/', Comment.Multiline, '#pop'),
(r'[^*/]', Comment.Multiline),
(r'[*/]', Comment.Multiline)
],
'incontinuation': [
(r'^\s*\)', Generic, '#pop'),
(r'[^)]', Generic),
(r'[)]', Generic),
],
'commands': [
(r'(?i)^(\s*)(global|local|static|'
r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
r'SetBatchLines|SetCapslockState|SetControlDelay|'
r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
r'WinWait)\b', bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
Name.Function),
],
'builtInVariables': [
(r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
Name.Variable),
],
'labels': [
# hotkeys and labels
# technically, hotkey names are limited to named keys and buttons
(r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
(r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
# (r'.', Text), # no cheating
],
}
class AutoItLexer(RegexLexer):
"""
For `AutoIt <http://www.autoitscript.com/site/autoit/>`_ files.
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
.. versionadded:: 1.6
"""
name = 'AutoIt'
aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
# Keywords, functions, macros from au3.keywords.properties
# which can be found in AutoIt installed directory, e.g.
# c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
keywords = """\
#include-once #include #endregion #forcedef #forceref #region
and byref case continueloop dim do else elseif endfunc endif
endselect exit exitloop for func global
if local next not or return select step
then to until wend while exit""".split()
functions = """\
abs acos adlibregister adlibunregister asc ascw asin assign atan
autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
consolewrite consolewriteerror controlclick controlcommand controldisable
controlenable controlfocus controlgetfocus controlgethandle controlgetpos
controlgettext controlhide controllistview controlmove controlsend
controlsettext controlshow controltreeview cos dec dircopy dircreate
dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
drivespacetotal drivestatus envget envset envupdate eval execute exp
filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
filedelete fileexists filefindfirstfile filefindnextfile fileflush
filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
filegetshortname filegetsize filegettime filegetversion fileinstall filemove
fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
filesavedialog fileselectfolder filesetattrib filesetpos filesettime
filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
guictrlcreateinput guictrlcreatelabel guictrlcreatelist
guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
guictrlcreatepic guictrlcreateprogress guictrlcreateradio
guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
inetread inidelete iniread inireadsection inireadsectionnames
inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
number objcreate objcreateinterface objevent objevent objget objname
onautoitexitregister onautoitexitunregister opt ping pixelchecksum
pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
processgetstats processlist processsetpriority processwait processwaitclose
progressoff progresson progressset ptr random regdelete regenumkey
regenumval regread regwrite round run runas runaswait runwait send
sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
string stringaddcr stringcompare stringformat stringfromasciiarray
stringinstr stringisalnum stringisalpha stringisascii stringisdigit
stringisfloat stringisint stringislower stringisspace stringisupper
stringisxdigit stringleft stringlen stringlower stringmid stringregexp
stringregexpreplace stringreplace stringright stringsplit stringstripcr
stringstripws stringtoasciiarray stringtobinary stringtrimleft
stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
winactivate winactive winclose winexists winflash wingetcaretpos
wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
wingetstate wingettext wingettitle winkill winlist winmenuselectitem
winminimizeall winminimizeallundo winmove winsetontop winsetstate
winsettitle winsettrans winwait winwaitactive winwaitclose
winwaitnotactive""".split()
macros = """\
@appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
@autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
@cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
@desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
@exitcode @exitmethod @extended @favoritescommondir @favoritesdir
@gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
@gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
@ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
@logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
@mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
@osversion @programfilesdir @programscommondir @programsdir @scriptdir
@scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
@startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
@sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
@sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
@sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
@tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
@windowsdir @workingdir @yday @year""".split()
tokens = {
'root': [
(r';.*\n', Comment.Single),
(r'(#comments-start|#cs)(.|\n)*?(#comments-end|#ce)',
Comment.Multiline),
(r'[\[\]{}(),;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
(r'[$|@][a-zA-Z_]\w*', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInMarcros'),
(r'"', String, combined('stringescape', 'dqs')),
(r"'", String, 'sqs'),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
(r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
(r'(?i)(\s*)(%s)\b' % '|'.join(keywords),
bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(%s)\b' % '|'.join(functions),
Name.Function),
],
'builtInMarcros': [
(r'(?i)(%s)\b' % '|'.join(macros),
Name.Variable.Global),
],
'labels': [
# sendkeys
(r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'sqs': [
(r'\'\'|\`([,%`abfnrtv])', String.Escape),
(r"'", String, '#pop'),
(r"[^'\n]+", String)
],
'garbage': [
(r'[^\S\n]', Text),
],
}

View file

@ -0,0 +1,103 @@
"""
pygments.lexers.bare
~~~~~~~~~~~~~~~~~~~~
Lexer for the BARE schema.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words, bygroups
from pygments.token import Text, Comment, Keyword, Name, Literal
__all__ = ['BareLexer']
class BareLexer(RegexLexer):
"""
For `BARE schema <https://baremessages.org>`_ schema source.
.. versionadded:: 2.7
"""
name = 'BARE'
filenames = ['*.bare']
aliases = ['bare']
flags = re.MULTILINE | re.UNICODE
keywords = [
'type',
'enum',
'u8',
'u16',
'u32',
'u64',
'uint',
'i8',
'i16',
'i32',
'i64',
'int',
'f32',
'f64',
'bool',
'void',
'data',
'string',
'optional',
'map',
]
tokens = {
'root': [
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\{)',
bygroups(Keyword, Text, Name.Class, Text), 'struct'),
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\()',
bygroups(Keyword, Text, Name.Class, Text), 'union'),
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)',
bygroups(Keyword, Text, Name, Text), 'typedef'),
(r'(enum)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\{)',
bygroups(Keyword, Text, Name.Class, Text), 'enum'),
(r'#.*?$', Comment),
(r'\s+', Text),
],
'struct': [
(r'\{', Text, '#push'),
(r'\}', Text, '#pop'),
(r'([a-zA-Z0-9]+)(:\s*)', bygroups(Name.Attribute, Text), 'typedef'),
(r'\s+', Text),
],
'union': [
(r'\)', Text, '#pop'),
(r'\s*\|\s*', Text),
(r'[A-Z][a-zA-Z0-9]+', Name.Class),
(words(keywords), Keyword),
(r'\s+', Text),
],
'typedef': [
(r'\[\]', Text),
(r'#.*?$', Comment, '#pop'),
(r'(\[)(\d+)(\])', bygroups(Text, Literal, Text)),
(r'<|>', Text),
(r'\(', Text, 'union'),
(r'(\[)([a-z][a-z-A-Z0-9]+)(\])', bygroups(Text, Keyword, Text)),
(r'(\[)([A-Z][a-z-A-Z0-9]+)(\])', bygroups(Text, Name.Class, Text)),
(r'([A-Z][a-z-A-Z0-9]+)', Name.Class),
(words(keywords), Keyword),
(r'\n', Text, '#pop'),
(r'\{', Text, 'struct'),
(r'\s+', Text),
(r'\d+', Literal),
],
'enum': [
(r'\{', Text, '#push'),
(r'\}', Text, '#pop'),
(r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)', bygroups(Name.Attribute, Text, Literal)),
(r'([A-Z][A-Z0-9_]*)', bygroups(Name.Attribute)),
(r'#.*?$', Comment),
(r'\s+', Text),
],
}

Some files were not shown because too many files have changed in this diff Show more