25'ten fazla konu seçemezsiniz Konular bir harf veya rakamla başlamalı, kısa çizgiler ('-') içerebilir ve en fazla 35 karakter uzunluğunda olabilir.

feedcake.py 8.4 KiB

5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
5 yıl önce
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. # This script is intended for personal and scientific use only
  2. import os
  3. import sys
  4. import re
  5. import hashlib
  6. import json
  7. from time import sleep
  8. import feedparser
  9. import requests
  10. from bs4 import BeautifulSoup, Comment
  11. from bs4.element import CData
  12. # default config location is a 'config.json' next to the script.
  13. try:
  14. filedir = os.path.dirname(os.path.abspath(__file__))
  15. if len(sys.argv) < 2:
  16. configpath = filedir+'/config.json'
  17. print("Using default config location: ", configpath)
  18. config = json.load(open(configpath))
  19. else:
  20. configpath = sys.argv[1]
  21. config = json.load(open(configpath))
  22. except:
  23. print("Problem reading config file: ", configpath)
  24. print("ERROR: Config file not found or invalid!")
  25. sys.exit(1)
  26. print(filedir)
  27. public_path = filedir + '/public'
  28. assets_path = public_path + '/assets'
  29. feeds_path = public_path + '/feeds'
  30. # e.g. https://example.com/some-string
  31. assets_url = config['assets_url']
  32. # "I'm a robot which promises you clicks and $ ... Give me ALL your content!"
  33. requestheaders = {
  34. 'user-'+'age'+'nt' :
  35. 'Mo' + 'zill' + 'a/5.' + '0 (' + 'comp' + 'ati' + 'ble; '+'Go'
  36. + 'og'+'le'+ 'bo' + 't/' + '2.1; +http' + '://www.' + 'go'
  37. + 'og'+ 'le'+'.com/'+'bo'+'t.html)'
  38. }
  39. # need filname safe strings for storing images along html files
  40. def get_valid_filename(s):
  41. s = str(s).split('?')[0].strip().strip('/').strip('http://').strip('https://').replace(' ', '-')
  42. return re.sub(r'(?u)[^-\w.]', '-', s)
  43. # Get a unique and valid filename from URL (for images)
  44. def filename_from_url(url):
  45. # remove get attributes and path
  46. new_filename = url.split('?')[0].split('/')[-1]
  47. # Split filename
  48. new_filename = new_filename.split('.')
  49. # insert a hash before suffix
  50. new_filename.insert(1, str(hashlib.md5(url.encode('utf-8')).hexdigest()) )
  51. # convert back to string and extra validate
  52. new_filename = get_valid_filename('.'.join(new_filename))
  53. return new_filename
  54. # Download images and so on
  55. def download_image(url, entry_dir, filename):
  56. # take care of protocol relative URLs ... let's just assume that https works.
  57. if url.startswith('//'):
  58. url = 'https:'+url
  59. response = requests.get(url, headers=requestheaders)
  60. if response.status_code == 200:
  61. with open(assets_path + '/' + entry_dir + '/' + filename, 'wb') as f:
  62. #f.write(response.content)
  63. for chunk in response.iter_content(1024):
  64. f.write(chunk)
  65. def process_feed(feed_url, output_filename):
  66. # Get the feed
  67. r_feed = requests.get(feed_url, headers=requestheaders)
  68. # TODO: exceptions.(what if 404 or whatever?)
  69. # Store data of new articles
  70. for entry in feedparser.parse(r_feed.text).entries:
  71. print(entry.link)
  72. entry_dir = get_valid_filename(entry.link) # input e.g. https://orf.at/stories/3117136/
  73. entry_path = assets_path + '/'+ entry_dir
  74. if not os.path.exists(entry_path):
  75. r = requests.get(entry.link.split('?')[0], headers=requestheaders)
  76. online_soup = BeautifulSoup(r.text, 'html.parser')
  77. content_soup = BeautifulSoup('<div></div>', 'html.parser')
  78. # Remove all Comments
  79. for element in online_soup(text=lambda text: isinstance(text, Comment)):
  80. element.extract()
  81. # domain and path specific rules
  82. # ... split strings for (very simple) ob+fu+sca+tion
  83. if entry.link.startswith('https://or'+'f.a'+'t/sto'+'ries'):
  84. if entry.date:
  85. article_time = content_soup.new_tag('time', datetime=entry.date)
  86. content_soup.div.append(article_time)
  87. article_headline = online_soup.find('h1', attrs={'class': 'story-lead-headline'})
  88. content_soup.div.append(article_headline)
  89. article_body = online_soup.find('div', attrs={'class': 'story-content'})
  90. content_soup.div.append(article_body)
  91. article_link = content_soup.new_tag('a', href=entry.link)
  92. article_link['class'] = 'source'
  93. article_link.string = 'Quelle (' + entry.link + ')'
  94. content_soup.div.append(article_link)
  95. if entry.link.startswith('https://de'+'rst'+'and'+'ard'+'.a'+'t/20'): # url starts with number ... too lazy for regex :)
  96. if entry.published:
  97. article_time = content_soup.new_tag('time', datetime=entry.published)
  98. content_soup.div.append(article_time)
  99. article_headline = online_soup.find('h1', attrs={'itemprop': 'headline'})
  100. content_soup.div.append(article_headline)
  101. # images etc
  102. article_aside = online_soup.find('div', id="content-aside")
  103. content_soup.div.append(article_aside)
  104. article_body = online_soup.find('div', attrs={'itemprop': 'articleBody'})
  105. content_soup.div.append(article_body)
  106. # modify original link -> mobile version and comment section
  107. link_to_comments = re.sub(r'(\/\/)', r'\1mobil.',entry.link.split('?')[0]) + '?_viewMode=forum#'
  108. article_comments_link = content_soup.new_tag('a', href=link_to_comments)
  109. article_comments_link['class'] = 'comments'
  110. article_comments_p = content_soup.new_tag('p')
  111. article_comments_link.string = 'Kommentare'
  112. article_comments_p.append(article_comments_link)
  113. content_soup.div.append(article_comments_p)
  114. article_link = content_soup.new_tag('a', href=entry.link.split('?')[0])
  115. article_link['class'] = 'source'
  116. article_link.string = 'Quelle: ' + entry.link.split('?')[0]
  117. content_soup.div.append(article_link)
  118. # create directory for storing and serving html and images
  119. os.makedirs(entry_path)
  120. # download all article images and replace image source
  121. for img in content_soup.findAll('img'):
  122. if img.get('data-src'):
  123. old_url = img['data-src']
  124. if not old_url.startswith('data:'):
  125. new_filename = filename_from_url(old_url)
  126. img['data-src'] = assets_url + '/' + entry_dir + '/' + new_filename
  127. download_image(old_url, entry_dir, new_filename)
  128. if img.get('src'):
  129. old_url = img['src']
  130. if not old_url.startswith('data:'):
  131. new_filename = filename_from_url(old_url)
  132. img['src'] = assets_url + '/' + entry_dir + '/' + new_filename
  133. download_image(old_url, entry_dir, new_filename)
  134. if img.get('data-srcset'):
  135. srcset = img['data-srcset'].split(', ')
  136. new_srcset = []
  137. for src in srcset:
  138. old_url = src.split(' ')[0]
  139. src_res = src.split(' ')[1]
  140. new_filename = filename_from_url(old_url)
  141. download_image(old_url, entry_dir, new_filename)
  142. new_url = assets_url + '/' + entry_dir + '/' + new_filename
  143. src = ' '.join([new_url, src_res])
  144. new_srcset.append(src)
  145. img['data-srcset'] = ', '.join(new_srcset)
  146. # TODO(?): HTML5 picture tag
  147. f = open(entry_path + '/index.html', 'w')
  148. f.write(str(content_soup))
  149. f.close()
  150. sleep(1.3)
  151. # Create new feed
  152. # Maybe buiding a new feed from scretch using a template would be nicer but ...
  153. # let's just modify the original one!
  154. feed_soup = BeautifulSoup(r_feed.text, 'lxml-xml')
  155. for e in feed_soup.findAll('item'):
  156. entry_dir = get_valid_filename(e.link.text)
  157. f_content = open(assets_path + '/' + entry_dir + '/index.html', 'r')
  158. content_tag = feed_soup.new_tag('content:encoded')
  159. content_tag.string = CData(f_content.read())
  160. e.append(content_tag)
  161. f_content.close
  162. # create directory if not present
  163. os.makedirs(feeds_path, exist_ok=True)
  164. f = open(feeds_path + '/' + output_filename, 'w')
  165. f.write(str(feed_soup.prettify()))
  166. f.close()
  167. # Let's actually fetch the stuff!
  168. for feed in config['feeds']:
  169. process_feed(feed['source'], feed['destination'])