create class jsonfile

This commit is contained in:
vincent 2018-08-23 21:12:44 +02:00
parent c12341cd4b
commit 98e07cda07

View File

@ -90,9 +90,8 @@ class emmisionGUI(Frame):
class Interface: class Interface:
def __init__(self): def __init__(self,data):
self.datafilepath=os.path.dirname(os.path.realpath(__file__))+"/chaine.json" self.data = data
self.data = load_jsonfile(self.datafilepath)
self.fenetre = Tk() self.fenetre = Tk()
self.fenetre.title("recherche de chaine") self.fenetre.title("recherche de chaine")
self.value = StringVar() self.value = StringVar()
@ -131,11 +130,11 @@ class Interface:
try: try:
strlink=geturlprogrammetv(self.data[value]) strlink=geturlprogrammetv(self.data.get_chaine(value))
link= Labbelink(self.resultframe.interior,self.data[value],strlink) link= Labbelink(self.resultframe.interior,self.data.get_chaine(value),strlink)
link.pack() link.pack()
self.resultframe.update() self.resultframe.update()
print(self.data[value]) print(self.data.get_chaine(value))
emision=parse_emmission(strlink) emision=parse_emmission(strlink)
if emision: if emision:
if emision == "can't find show": if emision == "can't find show":
@ -160,8 +159,7 @@ class Interface:
def click_update(self): def click_update(self):
parsechaine(self.datafilepath) self.data.parsechaine()
self.data = load_jsonfile(self.datafilepath)
labelupdate = Label(self.resultframe, text="update chaine done"+"\r") labelupdate = Label(self.resultframe, text="update chaine done"+"\r")
labelupdate.pack() labelupdate.pack()
@ -169,16 +167,47 @@ class Interface:
webbrowser.open_new(link) webbrowser.open_new(link)
class JSONfile:
def __init__(self,filename):
import os,json
self.datafilepath=os.path.dirname(os.path.realpath(__file__))+"/"+filename
try:
with open(self.datafilepath, 'r', encoding='utf-8') as f:
self.data=json.load(f)
except FileNotFoundError:
self.parsechaine()
def load_jsonfile(file): def get_chaine(self,number):
try: return self.data[number]
with open(file, 'r', encoding='utf-8') as f: def parsechaine(self):
return json.load(f)
except FileNotFoundError: URL = 'https://fr.wikipedia.org/wiki/Liste_des_cha%C3%AEnes_de_Canal'
parsechaine(file) liste_chaine = {}
with open(file, 'r', encoding='utf-8') as f: response = urllib.request.urlopen(URL)
return json.load(f) html = response.read()
parse = BeautifulSoup(html,"html.parser")
for item in parse.find_all('table'):
if (item.get("class") == ['wikitable'] or item.get("class") == ['wikitable', 'sortable']):
for tr in item.find_all('tr'):
firstTD = tr.find()
num = firstTD.text
#print(num)
if RepresentsInt(num):
if RepresentsInt(firstTD.find_next().string):
#print(firstTD.find_next().find_next().text)
liste_chaine[int(num)] = firstTD.find_next().find_next().text
else:
#print(firstTD.find_next().string)
liste_chaine[int(num)] = firstTD.find_next().text
print(json.dumps(liste_chaine, indent=4))
self.data=liste_chaine
with open(self.datafilepath, 'w', encoding='utf-8') as f:
json.dump(liste_chaine, f, indent=4)
def __repr__(self):
return str(self.data)
def RepresentsInt(s): def RepresentsInt(s):
try: try:
@ -189,32 +218,6 @@ def RepresentsInt(s):
except TypeError: except TypeError:
return False return False
def parsechaine(file):
URL = 'https://fr.wikipedia.org/wiki/Liste_des_cha%C3%AEnes_de_Canal'
liste_chaine = {}
response = urllib.request.urlopen(URL)
html = response.read()
parse = BeautifulSoup(html,"html.parser")
for item in parse.find_all('table'):
if (item.get("class") == ['wikitable'] or item.get("class") == ['wikitable', 'sortable']):
for tr in item.find_all('tr'):
firstTD = tr.find()
num = firstTD.text
#print(num)
if RepresentsInt(num):
if RepresentsInt(firstTD.find_next().string):
#print(firstTD.find_next().find_next().text)
liste_chaine[int(num)] = firstTD.find_next().find_next().text
else:
#print(firstTD.find_next().string)
liste_chaine[int(num)] = firstTD.find_next().text
print(json.dumps(liste_chaine, indent=4))
with open(file, 'w', encoding='utf-8') as f:
json.dump(liste_chaine, f, indent=4)
def geturlprogrammetv(strsearch): def geturlprogrammetv(strsearch):
strsearch=unicodedata.normalize('NFD', strsearch).encode('ascii', 'ignore') strsearch=unicodedata.normalize('NFD', strsearch).encode('ascii', 'ignore')
strsearch=strsearch.decode("utf-8") strsearch=strsearch.decode("utf-8")
@ -241,7 +244,7 @@ def parse_emmission(URL):
casting=divcasting.find_all(href=re.compile("biographie")) casting=divcasting.find_all(href=re.compile("biographie"))
count=0 count=0
for actor in casting: for actor in casting:
casting[i]=actor.text casting[count]=actor.text
count+=1 count+=1
divsynopsis=parse.select_one(".episode-synopsis") divsynopsis=parse.select_one(".episode-synopsis")
img=divsynopsis.find_next('img')['data-src'] img=divsynopsis.find_next('img')['data-src']
@ -259,9 +262,7 @@ def remove_first_space (string):
return string[space_number:] return string[space_number:]
def cli(num): def cli(num,data):
datafilepath=os.path.dirname(os.path.realpath(__file__))+"/chaine.json"
data = load_jsonfile(datafilepath)
print(num) print(num)
try: try:
print(data[num]) print(data[num])
@ -283,11 +284,13 @@ def cli(num):
else: else:
print("pas de connection internet impossible de determiner l'émission du soir") print("pas de connection internet impossible de determiner l'émission du soir")
print("") print("")
data=JSONfile("chaine.json")
if len(sys.argv) > 1: if len(sys.argv) > 1:
for i in sys.argv[1:]: for arg in sys.argv[1:]:
if i =="update": if arg =="update":
parsechaine(os.path.dirname(os.path.realpath(__file__))+"/chaine.json") data.parsechaine()
else: else:
cli(i) cli(arg,data.data)
else: else:
Interface().mainloop() Interface(data).mainloop()