diff --git a/README.md b/README.md index 38de4c0..de84b7b 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ news pages * edit `config.json` * copy the cron-example: `cp cron-example.sh cron.sh`. * edit `cron.sh` +* make `cron.sh` executable: `chmod +x cron.sh` * add cronjob for `cron.sh`: `crontab -e` * `*/5 * * * * /absolute/path/to/cron.sh > /path/to/logfile 2>&1` * setup your webserver: the `base_url` must point to the `public` directory diff --git a/feedcake.py b/feedcake.py index 6a18945..e2c808a 100644 --- a/feedcake.py +++ b/feedcake.py @@ -118,7 +118,6 @@ def process_feed(feed_url, output_filename): content_soup.article.append(article_link) if entry.link.startswith('https://de'+'rst'+'and'+'ard'+'.a'+'t/20'): # url starts with number ... too lazy for regex :) - print(entry) if entry.published: article_time = content_soup.new_tag('time', datetime=entry.published) content_soup.article.append(article_time) @@ -153,21 +152,18 @@ def process_feed(feed_url, output_filename): print(img) if img.get('data-src'): old_url = img['data-src'] - print(old_url) if not old_url.startswith('data:'): new_filename = filename_from_url(old_url) img['data-src'] = base_url + '/' + entry_dir + '/' + new_filename download_image(old_url, entry_dir, new_filename) if img.get('src'): old_url = img['src'] - print(old_url) if not old_url.startswith('data:'): new_filename = filename_from_url(old_url) img['src'] = base_url + '/' + entry_dir + '/' + new_filename download_image(old_url, entry_dir, new_filename) if img.get('data-srcset'): srcset = img['data-srcset'].split(', ') - print(old_url) new_srcset = [] for src in srcset: old_url = src.split(' ')[0]