apt install python3-tk ghostscript
pip install camelot-py[cv]
pip install requests
pip install beautifulsoup4
import re
from urllib.parse import urljoin
import camelot
import pandas as pd
import requests
from bs4 import BeautifulSoup
def get_link(url, text):
r = requests.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.content, "html.parser")
tag = soup.find("a", text=re.compile(text))
link = urljoin(url, tag.get("href"))
return link
def set_col(df, n = 1):
if n > 1:
columns = ["".join(i) for i in zip(*(df.head(n).values))]
else:
columns = df.iloc[0]
return df.iloc[n:].set_axis(columns, axis=1).reset_index(drop=True)
url = get_link(
"https://www.mhlw.go.jp/stf/seisakunitsuite/bunya/0000121431_00086.html",
"^Über die aktuelle Situation einer neuen Coronavirus-Infektion und die Reaktion des Ministeriums für Gesundheit, Arbeit und Soziales",
)
link = get_link(url, "Anhang 1")
tables = camelot.read_pdf(link, pages="all", split_text=True, strip_text="\n", )
df1 = set_col(tables[0].df, 2)
df2 = set_col(tables[1].df)
df = pd.concat([df1, df2], axis=1)
df.columns = df.columns.str.replace("\s", "").str.replace("※\d", "")
df["Name der Präfekturen"] = df["Name der Präfekturen"].str.replace("\s", "").str.replace("※\d", "")
df = df.apply(lambda x: x.str.replace(",", ""))
df.mask(df == "-", inplace=True)
df.to_csv("corona.csv", encoding="utf_8_sig")
Recommended Posts