Package lxml :: Package html :: Module html5parser
[hide private]
[frames] | no frames]

Source Code for Module lxml.html.html5parser

  1  """ 
  2  An interface to html5lib. 
  3  """ 
  4   
  5  import urllib 
  6  from html5lib import HTMLParser as _HTMLParser 
  7  from lxml import etree 
  8  from lxml.html import _contains_block_level_tag, XHTML_NAMESPACE 
  9  from lxml.html._html5builder import TreeBuilder 
 10   
 11  # python3 compatibility 
 12  try: 
 13      _strings = basestring 
 14  except NameError: 
 15      _strings = (bytes, str) 
 16   
 17   
18 -class HTMLParser(_HTMLParser):
19 """An html5lib HTML parser with lxml as tree.""" 20
21 - def __init__(self, strict=False):
22 _HTMLParser.__init__(self, strict=strict, tree=TreeBuilder)
23 24 25 try: 26 from html5lib import XHTMLParser as _XHTMLParser 27 except ImportError: 28 pass 29 else:
30 - class XHTMLParser(_XHTMLParser):
31 """An html5lib XHTML Parser with lxml as tree.""" 32
33 - def __init__(self, strict=False):
34 _XHTMLParser.__init__(self, strict=strict, tree=TreeBuilder)
35 36 xhtml_parser = XHTMLParser() 37 38
39 -def _find_tag(tree, tag):
40 elem = tree.find(tag) 41 if elem is not None: 42 return elem 43 return tree.find('{%s}%s' % (XHTML_NAMESPACE, tag))
44 45
46 -def document_fromstring(html, guess_charset=True, parser=None):
47 """Parse a whole document into a string.""" 48 if not isinstance(html, _strings): 49 raise TypeError('string required') 50 51 if parser is None: 52 parser = html_parser 53 54 return parser.parse(html, useChardet=guess_charset).getroot()
55 56
57 -def fragments_fromstring(html, no_leading_text=False, 58 guess_charset=False, parser=None):
59 """Parses several HTML elements, returning a list of elements. 60 61 The first item in the list may be a string. If no_leading_text is true, 62 then it will be an error if there is leading text, and it will always be 63 a list of only elements. 64 65 If `guess_charset` is `True` and the text was not unicode but a 66 bytestring, the `chardet` library will perform charset guessing on the 67 string. 68 """ 69 if not isinstance(html, _strings): 70 raise TypeError('string required') 71 72 if parser is None: 73 parser = html_parser 74 75 children = parser.parseFragment(html, 'div', useChardet=guess_charset) 76 if children and isinstance(children[0], _strings): 77 if no_leading_text: 78 if children[0].strip(): 79 raise etree.ParserError('There is leading text: %r' % 80 children[0]) 81 del children[0] 82 return children
83 84
85 -def fragment_fromstring(html, create_parent=False, 86 guess_charset=False, parser=None):
87 """Parses a single HTML element; it is an error if there is more than 88 one element, or if anything but whitespace precedes or follows the 89 element. 90 91 If create_parent is true (or is a tag name) then a parent node 92 will be created to encapsulate the HTML in a single element. 93 """ 94 if not isinstance(html, _strings): 95 raise TypeError('string required') 96 97 if create_parent: 98 container = create_parent or 'div' 99 html = '<%s>%s</%s>' % (container, html, container) 100 101 children = fragments_fromstring(html, True, guess_charset, parser) 102 if not children: 103 raise etree.ParserError('No elements found') 104 if len(children) > 1: 105 raise etree.ParserError('Multiple elements found') 106 107 result = children[0] 108 if result.tail and result.tail.strip(): 109 raise etree.ParserError('Element followed by text: %r' % result.tail) 110 result.tail = None 111 return result
112 113
114 -def fromstring(html, guess_charset=True, parser=None):
115 """Parse the html, returning a single element/document. 116 117 This tries to minimally parse the chunk of text, without knowing if it 118 is a fragment or a document. 119 120 base_url will set the document's base_url attribute (and the tree's docinfo.URL) 121 """ 122 if not isinstance(html, _strings): 123 raise TypeError('string required') 124 doc = document_fromstring(html, parser=parser, 125 guess_charset=guess_charset) 126 127 # document starts with doctype or <html>, full document! 128 start = html[:50].lstrip().lower() 129 if start.startswith('<html') or start.startswith('<!doctype'): 130 return doc 131 132 head = _find_tag(doc, 'head') 133 134 # if the head is not empty we have a full document 135 if len(head): 136 return doc 137 138 body = _find_tag(doc, 'body') 139 140 # The body has just one element, so it was probably a single 141 # element passed in 142 if (len(body) == 1 and (not body.text or not body.text.strip()) 143 and (not body[-1].tail or not body[-1].tail.strip())): 144 return body[0] 145 146 # Now we have a body which represents a bunch of tags which have the 147 # content that was passed in. We will create a fake container, which 148 # is the body tag, except <body> implies too much structure. 149 if _contains_block_level_tag(body): 150 body.tag = 'div' 151 else: 152 body.tag = 'span' 153 return body
154 155
156 -def parse(filename_url_or_file, guess_charset=True, parser=None):
157 """Parse a filename, URL, or file-like object into an HTML document 158 tree. Note: this returns a tree, not an element. Use 159 ``parse(...).getroot()`` to get the document root. 160 """ 161 if parser is None: 162 parser = html_parser 163 if isinstance(filename_url_or_file, basestring): 164 fp = urllib.urlopen(filename_url_or_file) 165 else: 166 fp = filename_url_or_file 167 return parser.parse(fp, useChardet=guess_charset)
168 169 170 html_parser = HTMLParser() 171