Compare commits
3 Commits
e5df532825
...
f63779b0b8
| Author | SHA1 | Date | |
|---|---|---|---|
| f63779b0b8 | |||
| 9ec5347a7f | |||
| 30471c96c3 |
@ -89,12 +89,6 @@ CONCURRENT_REQUESTS_PER_DOMAIN = 8
|
|||||||
|
|
||||||
# Set settings whose default value is deprecated to a future-proof value
|
# Set settings whose default value is deprecated to a future-proof value
|
||||||
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
|
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
|
||||||
FEEDS = {
|
|
||||||
"items.json": {
|
|
||||||
"format": "json",
|
|
||||||
"encoding": "utf8"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
FEED_EXPORT_ENCODING = "utf-8"
|
FEED_EXPORT_ENCODING = "utf-8"
|
||||||
|
|
||||||
LOG_ENABLED = False
|
LOG_ENABLED = False
|
||||||
|
|||||||
@ -21,15 +21,29 @@ class HeatSpider(scrapy.Spider):
|
|||||||
|
|
||||||
for idx in range(1, 8):
|
for idx in range(1, 8):
|
||||||
if not tree.find(f".//mandagratt{idx}rubrik") is None:
|
if not tree.find(f".//mandagratt{idx}rubrik") is None:
|
||||||
self.monday.append({"rubrik": tree.find(f".//mandagratt{idx}rubrik").text, "text": tree.find(f".//mandagratt{idx}text").text})
|
rubrik = tree.find(f".//mandagratt{idx}rubrik").text
|
||||||
|
text = tree.find(f".//mandagratt{idx}text").text
|
||||||
|
self.monday.append(f"{rubrik} {text}".strip())
|
||||||
|
|
||||||
if not tree.find(f".//tisdagratt{idx}rubrik") is None:
|
if not tree.find(f".//tisdagratt{idx}rubrik") is None:
|
||||||
self.tuesday.append({"rubrik": tree.find(f".//tisdagratt{idx}rubrik").text, "text": tree.find(f".//tisdagratt{idx}text").text})
|
rubrik = tree.find(f".//tisdagratt{idx}rubrik").text
|
||||||
|
text = tree.find(f".//tisdagratt{idx}text").text
|
||||||
|
self.tuesday.append(f"{rubrik} {text}".strip())
|
||||||
|
|
||||||
if not tree.find(f".//onsdagratt{idx}rubrik") is None:
|
if not tree.find(f".//onsdagratt{idx}rubrik") is None:
|
||||||
self.wednesday.append({"rubrik": tree.find(f".//onsdagratt{idx}rubrik").text, "text": tree.find(f".//onsdagratt{idx}text").text})
|
rubrik = tree.find(f".//onsdagratt{idx}rubrik").text
|
||||||
|
text = tree.find(f".//onsdagratt{idx}text").text
|
||||||
|
self.wednesday.append(f"{rubrik} {text}".strip())
|
||||||
|
|
||||||
if not tree.find(f".//torsdagratt{idx}rubrik") is None:
|
if not tree.find(f".//torsdagratt{idx}rubrik") is None:
|
||||||
self.thursday.append({"rubrik": tree.find(f".//torsdagratt{idx}rubrik").text, "text": tree.find(f".//torsdagratt{idx}text").text})
|
rubrik = tree.find(f".//torsdagratt{idx}rubrik").text
|
||||||
|
text = tree.find(f".//torsdagratt{idx}text").text
|
||||||
|
self.thursday.append(f"{rubrik} {text}".strip())
|
||||||
|
|
||||||
if not tree.find(f".//fredagratt{idx}rubrik") is None:
|
if not tree.find(f".//fredagratt{idx}rubrik") is None:
|
||||||
self.friday.append({"rubrik": tree.find(f".//fredagratt{idx}rubrik").text, "text": tree.find(f".//fredagratt{idx}text").text})
|
rubrik = tree.find(f".//fredagratt{idx}rubrik").text
|
||||||
|
text = tree.find(f".//fredagratt{idx}text").text
|
||||||
|
self.friday.append(f"{rubrik} {text}".strip())
|
||||||
|
|
||||||
dagens["week"] = self.week
|
dagens["week"] = self.week
|
||||||
dagens["monday"] = self.monday
|
dagens["monday"] = self.monday
|
||||||
@ -44,7 +58,6 @@ class HeatSpider(scrapy.Spider):
|
|||||||
url = ""
|
url = ""
|
||||||
dagens = DagensLunchHeat()
|
dagens = DagensLunchHeat()
|
||||||
dagens["place"] = "Heat Kopparlunden"
|
dagens["place"] = "Heat Kopparlunden"
|
||||||
dagens["scraped_by"] = f"{self.__class__.__name__}"
|
|
||||||
for js in scripts:
|
for js in scripts:
|
||||||
if not "jQuery(function( $ ){" in js:
|
if not "jQuery(function( $ ){" in js:
|
||||||
continue
|
continue
|
||||||
@ -54,6 +67,7 @@ class HeatSpider(scrapy.Spider):
|
|||||||
if "url:\"https://castit.nu/xml" in line:
|
if "url:\"https://castit.nu/xml" in line:
|
||||||
url = line[5:-2]
|
url = line[5:-2]
|
||||||
yield scrapy.Request(url, callback=self.extract_xml, meta={"dagens": dagens})
|
yield scrapy.Request(url, callback=self.extract_xml, meta={"dagens": dagens})
|
||||||
|
dagens["scraped_by"] = f"{self.__class__.__name__}"
|
||||||
dagens["scraped_at"] = f"{datetime.now().isoformat()}"
|
dagens["scraped_at"] = f"{datetime.now().isoformat()}"
|
||||||
|
|
||||||
return dagens
|
return dagens
|
||||||
|
|||||||
@ -57,6 +57,7 @@ if __name__ == "__main__":
|
|||||||
_heat = None
|
_heat = None
|
||||||
_max = None
|
_max = None
|
||||||
res = dagens_lunch_results()
|
res = dagens_lunch_results()
|
||||||
|
dir.mkdir(parents=True, exist_ok=True)
|
||||||
with file.open("a") as f:
|
with file.open("a") as f:
|
||||||
f.write("[\n")
|
f.write("[\n")
|
||||||
for idx, item in enumerate(res):
|
for idx, item in enumerate(res):
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user