OXIESEC PANEL
- Current Dir:
/
/
opt
/
alt
/
python311
/
lib
/
python3.11
/
site-packages
/
markdown_it
Server IP: 2a02:4780:11:1084:0:327f:3464:10
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
09/05/2025 09:34:01 AM
rwxr-xr-x
📄
__init__.py
113 bytes
05/08/2024 06:42:35 PM
rw-r--r--
📁
__pycache__
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📄
_compat.py
246 bytes
05/08/2024 06:42:35 PM
rw-r--r--
📄
_punycode.py
2.31 KB
05/08/2024 06:42:35 PM
rw-r--r--
📁
cli
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📁
common
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📁
helpers
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📄
main.py
12.47 KB
05/08/2024 06:42:35 PM
rw-r--r--
📄
parser_block.py
3.82 KB
05/08/2024 06:42:35 PM
rw-r--r--
📄
parser_core.py
1010 bytes
05/08/2024 06:42:35 PM
rw-r--r--
📄
parser_inline.py
4.88 KB
05/08/2024 06:42:35 PM
rw-r--r--
📄
port.yaml
2.39 KB
05/08/2024 06:42:35 PM
rw-r--r--
📁
presets
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📄
py.typed
26 bytes
05/08/2024 06:42:35 PM
rw-r--r--
📄
renderer.py
9.74 KB
05/08/2024 06:42:35 PM
rw-r--r--
📄
ruler.py
8.98 KB
05/08/2024 06:42:35 PM
rw-r--r--
📁
rules_block
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📁
rules_core
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📁
rules_inline
-
05/08/2024 06:42:35 PM
rwxr-xr-x
📄
token.py
6.29 KB
05/08/2024 06:42:35 PM
rw-r--r--
📄
tree.py
11.15 KB
05/08/2024 06:42:35 PM
rw-r--r--
📄
utils.py
5.24 KB
05/08/2024 06:42:35 PM
rw-r--r--
Editing: parser_block.py
Close
"""Block-level tokenizer.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, Callable from . import rules_block from .ruler import Ruler from .rules_block.state_block import StateBlock from .token import Token from .utils import EnvType if TYPE_CHECKING: from markdown_it import MarkdownIt LOGGER = logging.getLogger(__name__) RuleFuncBlockType = Callable[[StateBlock, int, int, bool], bool] """(state: StateBlock, startLine: int, endLine: int, silent: bool) -> matched: bool) `silent` disables token generation, useful for lookahead. """ _rules: list[tuple[str, RuleFuncBlockType, list[str]]] = [ # First 2 params - rule name & source. Secondary array - list of rules, # which can be terminated by this one. ("table", rules_block.table, ["paragraph", "reference"]), ("code", rules_block.code, []), ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]), ( "blockquote", rules_block.blockquote, ["paragraph", "reference", "blockquote", "list"], ), ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]), ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]), ("reference", rules_block.reference, []), ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]), ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]), ("lheading", rules_block.lheading, []), ("paragraph", rules_block.paragraph, []), ] class ParserBlock: """ ParserBlock#ruler -> Ruler [[Ruler]] instance. Keep configuration of block rules. """ def __init__(self) -> None: self.ruler = Ruler[RuleFuncBlockType]() for name, rule, alt in _rules: self.ruler.push(name, rule, {"alt": alt}) def tokenize(self, state: StateBlock, startLine: int, endLine: int) -> None: """Generate tokens for input range.""" rules = self.ruler.getRules("") line = startLine maxNesting = state.md.options.maxNesting hasEmptyLines = False while line < endLine: state.line = line = state.skipEmptyLines(line) if line >= endLine: break if state.sCount[line] < state.blkIndent: # Termination condition for nested calls. # Nested calls currently used for blockquotes & lists break if state.level >= maxNesting: # If nesting level exceeded - skip tail to the end. # That's not ordinary situation and we should not care about content. state.line = endLine break # Try all possible rules. # On success, rule should: # - update `state.line` # - update `state.tokens` # - return True for rule in rules: if rule(state, line, endLine, False): break # set state.tight if we had an empty line before current tag # i.e. latest empty line should not count state.tight = not hasEmptyLines line = state.line # paragraph might "eat" one newline after it in nested lists if (line - 1) < endLine and state.isEmpty(line - 1): hasEmptyLines = True if line < endLine and state.isEmpty(line): hasEmptyLines = True line += 1 state.line = line def parse( self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] ) -> list[Token] | None: """Process input string and push block tokens into `outTokens`.""" if not src: return None state = StateBlock(src, md, env, outTokens) self.tokenize(state, state.line, state.lineMax) return state.tokens