1"""Diagnostic functions, mainly for use when doing tech support.""" 2 3# Use of this source code is governed by the MIT license. 4__license__ = "MIT" 5 6import cProfile 7from io import BytesIO 8from html.parser import HTMLParser 9import bs4 10from bs4 import BeautifulSoup, __version__ 11from bs4.builder import builder_registry 12 13import os 14import pstats 15import random 16import tempfile 17import time 18import traceback 19import sys 20import cProfile 21 22def diagnose(data): 23 """Diagnostic suite for isolating common problems. 24 25 :param data: A string containing markup that needs to be explained. 26 :return: None; diagnostics are printed to standard output. 27 """ 28 print(("Diagnostic running on Beautiful Soup %s" % __version__)) 29 print(("Python version %s" % sys.version)) 30 31 basic_parsers = ["html.parser", "html5lib", "lxml"] 32 for name in basic_parsers: 33 for builder in builder_registry.builders: 34 if name in builder.features: 35 break 36 else: 37 basic_parsers.remove(name) 38 print(( 39 "I noticed that %s is not installed. Installing it may help." % 40 name)) 41 42 if 'lxml' in basic_parsers: 43 basic_parsers.append("lxml-xml") 44 try: 45 from lxml import etree 46 print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)))) 47 except ImportError as e: 48 print( 49 "lxml is not installed or couldn't be imported.") 50 51 52 if 'html5lib' in basic_parsers: 53 try: 54 import html5lib 55 print(("Found html5lib version %s" % html5lib.__version__)) 56 except ImportError as e: 57 print( 58 "html5lib is not installed or couldn't be imported.") 59 60 if hasattr(data, 'read'): 61 data = data.read() 62 63 for parser in basic_parsers: 64 print(("Trying to parse your markup with %s" % parser)) 65 success = False 66 try: 67 soup = BeautifulSoup(data, features=parser) 68 success = True 69 except Exception as e: 70 print(("%s could not parse the markup." % parser)) 71 traceback.print_exc() 72 if success: 73 print(("Here's what %s did with the markup:" % parser)) 74 print((soup.prettify())) 75 76 print(("-" * 80)) 77 78def lxml_trace(data, html=True, **kwargs): 79 """Print out the lxml events that occur during parsing. 80 81 This lets you see how lxml parses a document when no Beautiful 82 Soup code is running. You can use this to determine whether 83 an lxml-specific problem is in Beautiful Soup's lxml tree builders 84 or in lxml itself. 85 86 :param data: Some markup. 87 :param html: If True, markup will be parsed with lxml's HTML parser. 88 if False, lxml's XML parser will be used. 89 """ 90 from lxml import etree 91 recover = kwargs.pop('recover', True) 92 if isinstance(data, str): 93 data = data.encode("utf8") 94 reader = BytesIO(data) 95 for event, element in etree.iterparse( 96 reader, html=html, recover=recover, **kwargs 97 ): 98 print(("%s, %4s, %s" % (event, element.tag, element.text))) 99 100class AnnouncingParser(HTMLParser): 101 """Subclass of HTMLParser that announces parse events, without doing 102 anything else. 103 104 You can use this to get a picture of how html.parser sees a given 105 document. The easiest way to do this is to call `htmlparser_trace`. 106 """ 107 108 def _p(self, s): 109 print(s) 110 111 def handle_starttag(self, name, attrs): 112 self._p("%s START" % name) 113 114 def handle_endtag(self, name): 115 self._p("%s END" % name) 116 117 def handle_data(self, data): 118 self._p("%s DATA" % data) 119 120 def handle_charref(self, name): 121 self._p("%s CHARREF" % name) 122 123 def handle_entityref(self, name): 124 self._p("%s ENTITYREF" % name) 125 126 def handle_comment(self, data): 127 self._p("%s COMMENT" % data) 128 129 def handle_decl(self, data): 130 self._p("%s DECL" % data) 131 132 def unknown_decl(self, data): 133 self._p("%s UNKNOWN-DECL" % data) 134 135 def handle_pi(self, data): 136 self._p("%s PI" % data) 137 138def htmlparser_trace(data): 139 """Print out the HTMLParser events that occur during parsing. 140 141 This lets you see how HTMLParser parses a document when no 142 Beautiful Soup code is running. 143 144 :param data: Some markup. 145 """ 146 parser = AnnouncingParser() 147 parser.feed(data) 148 149_vowels = "aeiou" 150_consonants = "bcdfghjklmnpqrstvwxyz" 151 152def rword(length=5): 153 "Generate a random word-like string." 154 s = '' 155 for i in range(length): 156 if i % 2 == 0: 157 t = _consonants 158 else: 159 t = _vowels 160 s += random.choice(t) 161 return s 162 163def rsentence(length=4): 164 "Generate a random sentence-like string." 165 return " ".join(rword(random.randint(4,9)) for i in range(length)) 166 167def rdoc(num_elements=1000): 168 """Randomly generate an invalid HTML document.""" 169 tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] 170 elements = [] 171 for i in range(num_elements): 172 choice = random.randint(0,3) 173 if choice == 0: 174 # New tag. 175 tag_name = random.choice(tag_names) 176 elements.append("<%s>" % tag_name) 177 elif choice == 1: 178 elements.append(rsentence(random.randint(1,4))) 179 elif choice == 2: 180 # Close a tag. 181 tag_name = random.choice(tag_names) 182 elements.append("</%s>" % tag_name) 183 return "<html>" + "\n".join(elements) + "</html>" 184 185def benchmark_parsers(num_elements=100000): 186 """Very basic head-to-head performance benchmark.""" 187 print(("Comparative parser benchmark on Beautiful Soup %s" % __version__)) 188 data = rdoc(num_elements) 189 print(("Generated a large invalid HTML document (%d bytes)." % len(data))) 190 191 for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: 192 success = False 193 try: 194 a = time.time() 195 soup = BeautifulSoup(data, parser) 196 b = time.time() 197 success = True 198 except Exception as e: 199 print(("%s could not parse the markup." % parser)) 200 traceback.print_exc() 201 if success: 202 print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a))) 203 204 from lxml import etree 205 a = time.time() 206 etree.HTML(data) 207 b = time.time() 208 print(("Raw lxml parsed the markup in %.2fs." % (b-a))) 209 210 import html5lib 211 parser = html5lib.HTMLParser() 212 a = time.time() 213 parser.parse(data) 214 b = time.time() 215 print(("Raw html5lib parsed the markup in %.2fs." % (b-a))) 216 217def profile(num_elements=100000, parser="lxml"): 218 """Use Python's profiler on a randomly generated document.""" 219 filehandle = tempfile.NamedTemporaryFile() 220 filename = filehandle.name 221 222 data = rdoc(num_elements) 223 vars = dict(bs4=bs4, data=data, parser=parser) 224 cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) 225 226 stats = pstats.Stats(filename) 227 # stats.strip_dirs() 228 stats.sort_stats("cumulative") 229 stats.print_stats('_html5lib|bs4', 50) 230 231# If this file is run as a script, standard input is diagnosed. 232if __name__ == '__main__': 233 diagnose(sys.stdin.read()) 234