lexer.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #
  2. # Copyright (C) 2009-2020 the sqlparse authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of python-sqlparse and is released under
  6. # the BSD License: https://opensource.org/licenses/BSD-3-Clause
  7. """SQL Lexer"""
  8. import re
  9. # This code is based on the SqlLexer in pygments.
  10. # http://pygments.org/
  11. # It's separated from the rest of pygments to increase performance
  12. # and to allow some customizations.
  13. from io import TextIOBase
  14. from sqlparse import tokens, keywords
  15. from sqlparse.utils import consume
  16. class Lexer:
  17. """The Lexer supports configurable syntax.
  18. To add support for additional keywords, use the `add_keywords` method."""
  19. _default_intance = None
  20. # Development notes:
  21. # - This class is prepared to be able to support additional SQL dialects
  22. # in the future by adding additional functions that take the place of
  23. # the function default_initialization()
  24. # - The lexer class uses an explicit singleton behavior with the
  25. # instance-getter method get_default_instance(). This mechanism has
  26. # the advantage that the call signature of the entry-points to the
  27. # sqlparse library are not affected. Also, usage of sqlparse in third
  28. # party code does not need to be adapted. On the other hand, singleton
  29. # behavior is not thread safe, and the current implementation does not
  30. # easily allow for multiple SQL dialects to be parsed in the same
  31. # process. Such behavior can be supported in the future by passing a
  32. # suitably initialized lexer object as an additional parameter to the
  33. # entry-point functions (such as `parse`). Code will need to be written
  34. # to pass down and utilize such an object. The current implementation
  35. # is prepared to support this thread safe approach without the
  36. # default_instance part needing to change interface.
  37. @classmethod
  38. def get_default_instance(cls):
  39. """Returns the lexer instance used internally
  40. by the sqlparse core functions."""
  41. if cls._default_intance is None:
  42. cls._default_intance = cls()
  43. cls._default_intance.default_initialization()
  44. return cls._default_intance
  45. def default_initialization(self):
  46. """Initialize the lexer with default dictionaries.
  47. Useful if you need to revert custom syntax settings."""
  48. self.clear()
  49. self.set_SQL_REGEX(keywords.SQL_REGEX)
  50. self.add_keywords(keywords.KEYWORDS_COMMON)
  51. self.add_keywords(keywords.KEYWORDS_ORACLE)
  52. self.add_keywords(keywords.KEYWORDS_PLPGSQL)
  53. self.add_keywords(keywords.KEYWORDS_HQL)
  54. self.add_keywords(keywords.KEYWORDS_MSACCESS)
  55. self.add_keywords(keywords.KEYWORDS)
  56. def clear(self):
  57. """Clear all syntax configurations.
  58. Useful if you want to load a reduced set of syntax configurations.
  59. After this call, regexps and keyword dictionaries need to be loaded
  60. to make the lexer functional again."""
  61. self._SQL_REGEX = []
  62. self._keywords = []
  63. def set_SQL_REGEX(self, SQL_REGEX):
  64. """Set the list of regex that will parse the SQL."""
  65. FLAGS = re.IGNORECASE | re.UNICODE
  66. self._SQL_REGEX = [
  67. (re.compile(rx, FLAGS).match, tt)
  68. for rx, tt in SQL_REGEX
  69. ]
  70. def add_keywords(self, keywords):
  71. """Add keyword dictionaries. Keywords are looked up in the same order
  72. that dictionaries were added."""
  73. self._keywords.append(keywords)
  74. def is_keyword(self, value):
  75. """Checks for a keyword.
  76. If the given value is in one of the KEYWORDS_* dictionary
  77. it's considered a keyword. Otherwise, tokens.Name is returned.
  78. """
  79. val = value.upper()
  80. for kwdict in self._keywords:
  81. if val in kwdict:
  82. return kwdict[val], value
  83. else:
  84. return tokens.Name, value
  85. def get_tokens(self, text, encoding=None):
  86. """
  87. Return an iterable of (tokentype, value) pairs generated from
  88. `text`. If `unfiltered` is set to `True`, the filtering mechanism
  89. is bypassed even if filters are defined.
  90. Also preprocess the text, i.e. expand tabs and strip it if
  91. wanted and applies registered filters.
  92. Split ``text`` into (tokentype, text) pairs.
  93. ``stack`` is the initial stack (default: ``['root']``)
  94. """
  95. if isinstance(text, TextIOBase):
  96. text = text.read()
  97. if isinstance(text, str):
  98. pass
  99. elif isinstance(text, bytes):
  100. if encoding:
  101. text = text.decode(encoding)
  102. else:
  103. try:
  104. text = text.decode('utf-8')
  105. except UnicodeDecodeError:
  106. text = text.decode('unicode-escape')
  107. else:
  108. raise TypeError("Expected text or file-like object, got {!r}".
  109. format(type(text)))
  110. iterable = enumerate(text)
  111. for pos, char in iterable:
  112. for rexmatch, action in self._SQL_REGEX:
  113. m = rexmatch(text, pos)
  114. if not m:
  115. continue
  116. elif isinstance(action, tokens._TokenType):
  117. yield action, m.group()
  118. elif action is keywords.PROCESS_AS_KEYWORD:
  119. yield self.is_keyword(m.group())
  120. consume(iterable, m.end() - pos - 1)
  121. break
  122. else:
  123. yield tokens.Error, char
  124. def tokenize(sql, encoding=None):
  125. """Tokenize sql.
  126. Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
  127. of ``(token type, value)`` items.
  128. """
  129. return Lexer.get_default_instance().get_tokens(sql, encoding)