|
|
@@ -190,7 +190,7 @@ for p in reversed(s.entries): |
|
|
|
shouldpost = False |
|
|
|
print("skip: already posted") |
|
|
|
# process only unprocessed tweets less than n days old |
|
|
|
|
|
|
|
|
|
|
|
shouldpost = True |
|
|
|
|
|
|
|
posttime = datetime(p.published_parsed.tm_year, p.published_parsed.tm_mon, p.published_parsed.tm_mday, p.published_parsed.tm_hour, p.published_parsed.tm_min, p.published_parsed.tm_sec) |
|
|
@@ -246,7 +246,8 @@ for p in reversed(s.entries): |
|
|
|
else: |
|
|
|
print('Dryrun: not fetching ', pic, ' and not uploading it to mastodon') |
|
|
|
|
|
|
|
|
|
|
|
poster = p.title.split(']')[0].strip('[') |
|
|
|
poster_text = "\n\nvia %s on soup.io" % poster |
|
|
|
# remove all html stuff - python module in use only supports markdown, not pure plaintext |
|
|
|
textsrc = h.handle(p.summary_detail.value.replace("<small>", "<br><small>")) |
|
|
|
# free text from lines without visible characters |
|
|
@@ -265,9 +266,12 @@ for p in reversed(s.entries): |
|
|
|
source = '\n\nSource: ' + p.link |
|
|
|
|
|
|
|
# shorten text if too long |
|
|
|
maximumlegth = 500 - 1 - len(source) - 50 # 50 ... just in case (if they also count attachement url and so on) |
|
|
|
maximumlegth = 500 - 1 - len(poster_text) - len(source) - 50 # 50 ... just in case (if they also count attachement url and so on) |
|
|
|
text = (text[:maximumlegth] + '…') if len(text) > maximumlegth else text |
|
|
|
|
|
|
|
# add poster |
|
|
|
text += poster_text |
|
|
|
|
|
|
|
# add source |
|
|
|
text += source |
|
|
|
|
|
|
|