Commit b0e36c9e authored by Godefroid Chapelle's avatar Godefroid Chapelle

Problem: double parsing is slow

Solution: remove it now that it seems to be correct
parent b6f77959
...@@ -126,7 +126,6 @@ def patch_PackageIndex(): ...@@ -126,7 +126,6 @@ def patch_PackageIndex():
except TypeError: except TypeError:
plinks = parse_links(html_page) plinks = parse_links(html_page)
plinks = list(plinks) plinks = list(plinks)
pip_links = [l.url for l in plinks]
# --- END OF LOCAL CHANGES --- # --- END OF LOCAL CHANGES ---
...@@ -137,14 +136,6 @@ def patch_PackageIndex(): ...@@ -137,14 +136,6 @@ def patch_PackageIndex():
# --- LOCAL CHANGES MADE HERE: --- # --- LOCAL CHANGES MADE HERE: ---
links = []
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
links.append(_clean_link(link))
# TODO: remove assertion and double index page parsing before releasing.
assert set(pip_links) == set(links)
for link in plinks: for link in plinks:
if _check_link_requires_python(link, PY_VERSION_INFO): if _check_link_requires_python(link, PY_VERSION_INFO):
self.process_url(link.url) self.process_url(link.url)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment