Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-fanficfare for 
openSUSE:Factory checked in at 2025-03-03 16:05:44
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
 and      /work/SRC/openSUSE:Factory/.python-fanficfare.new.19136 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-fanficfare"

Mon Mar  3 16:05:44 2025 rev:65 rq:1249814 version:4.43.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes      
2025-02-09 20:50:56.080606160 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-fanficfare.new.19136/python-fanficfare.changes
   2025-03-03 16:05:54.077187851 +0100
@@ -1,0 +2,34 @@
+Sun Mar  2 22:28:58 UTC 2025 - Matej Cepl <mc...@cepl.eu>
+
+- Update to 4.43.0:
+  - base_xenforoforum: Add details_spoiler option for
+    [#1165](https://github.com/JimmXinu/FanFicFare/issues/1165)
+  - Add [base_otw] with use_basic_cache:true to defaults.ini
+  - BrowserCache: Better handle cache file changing/failing while
+    reading.
+  - Fix for BrowserCache for images--cache partitioned by
+    parent(story) page.
+  - base_otw_adapter: Detect & report 'This site is in beta' page
+  - AO3: Double default slow_down_sleep_time
+  - For adapter_mcstoriescom: Remove /../ from Get Story URLs
+    from web page
+  - adapter_mcstoriescom: Suppress site URLs that look like
+    stories but aren't. #1160
+  - adapter_fanfictionnet: Attempt chapter from m. (vs www) when
+    chapter not found
+  - adapter_literotica: Get more story urls. #1159 Thanks, dbhmw
+  - Add include_tocpage:always option.
+  - adapter_fimfictionnet: Further cover fix
+  - adapter_fimfictionnet: Fix cover images and use data-source
+    attr for img src.
+  - adapter_storiesonlinenet: Remove some code that broke parsing
+    when 'author' was in the title.
+  - adapter_literotica: http->https
+  - adapter_literotica: Site changes for non-www domains.
+  - Make plugin use own copy of six only--including in Smarten
+    Punc
+  - adapter_fictionlive: fix bounds check in vote accumulaton.
+    resolves JimmXinu#1154, Thanks HazelSh
+  - Make plugin use own copy of six only.
+
+-------------------------------------------------------------------

Old:
----
  FanFicFare-4.42.0.tar.gz

New:
----
  FanFicFare-4.43.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.WMIMQ4/_old  2025-03-03 16:05:54.733215296 +0100
+++ /var/tmp/diff_new_pack.WMIMQ4/_new  2025-03-03 16:05:54.737215463 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package python-fanficfare
 #
-# Copyright (c) 2024 SUSE LLC
+# Copyright (c) 2025 SUSE LLC
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -20,7 +20,7 @@
 %define modnamedown fanficfare
 %define skip_python2 1
 Name:           python-fanficfare
-Version:        4.42.0
+Version:        4.43.0
 Release:        0
 Summary:        Tool for making eBooks from stories on fanfiction and other 
web sites
 License:        GPL-3.0-only

++++++ FanFicFare-4.42.0.tar.gz -> FanFicFare-4.43.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/calibre-plugin/__init__.py 
new/FanFicFare-4.43.0/calibre-plugin/__init__.py
--- old/FanFicFare-4.42.0/calibre-plugin/__init__.py    2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/calibre-plugin/__init__.py    2025-03-01 
22:27:40.000000000 +0100
@@ -33,7 +33,7 @@
 from calibre.customize import InterfaceActionBase
 
 # pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 42, 0)
+__version__ = (4, 43, 0)
 
 ## Apparently the name for this class doesn't matter--it was still
 ## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/calibre-plugin/common_utils.py 
new/FanFicFare-4.43.0/calibre-plugin/common_utils.py
--- old/FanFicFare-4.42.0/calibre-plugin/common_utils.py        2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/calibre-plugin/common_utils.py        2025-03-01 
22:27:40.000000000 +0100
@@ -2,7 +2,6 @@
 
 from __future__ import (unicode_literals, division, absolute_import,
                         print_function)
-import six
 
 __license__   = 'GPL v3'
 __copyright__ = '2011, Grant Drake <grant.dr...@gmail.com>, 2018, Jim Miller'
@@ -22,7 +21,9 @@
 from calibre.gui2.keyboard import ShortcutConfig
 from calibre.utils.config import config_dir
 from calibre.utils.date import now, format_date, qt_to_dt, UNDEFINED_DATE
-from fanficfare.six import text_type as unicode
+
+import fanficfare.six as six
+from six import text_type as unicode
 
 # Global definition of our plugin name. Used for common functions that require 
this.
 plugin_name = None
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/calibre-plugin/config.py 
new/FanFicFare-4.43.0/calibre-plugin/config.py
--- old/FanFicFare-4.42.0/calibre-plugin/config.py      2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/calibre-plugin/config.py      2025-03-01 
22:27:40.000000000 +0100
@@ -2,7 +2,6 @@
 
 from __future__ import (unicode_literals, division, absolute_import,
                         print_function)
-import six
 
 __license__   = 'GPL v3'
 __copyright__ = '2021, Jim Miller'
@@ -24,7 +23,8 @@
 from calibre.gui2 import dynamic, info_dialog
 from calibre.gui2.complete2 import EditWithComplete
 from calibre.gui2.dialogs.confirm_delete import confirm
-from fanficfare.six import text_type as unicode
+import fanficfare.six as six
+from six import text_type as unicode
 
 try:
     from calibre.ebooks.covers import generate_cover as cal_generate_cover
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/calibre-plugin/fff_plugin.py 
new/FanFicFare-4.43.0/calibre-plugin/fff_plugin.py
--- old/FanFicFare-4.42.0/calibre-plugin/fff_plugin.py  2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/calibre-plugin/fff_plugin.py  2025-03-01 
22:27:40.000000000 +0100
@@ -2,13 +2,12 @@
 
 from __future__ import (unicode_literals, division, absolute_import,
                         print_function)
-import six
-from six.moves import range
 
 __license__   = 'GPL v3'
 __copyright__ = '2021, Jim Miller'
 __docformat__ = 'restructuredtext en'
 
+import fanficfare.six as six
 from fanficfare.six import ensure_text, string_types, text_type as unicode
 
 # import cProfile
@@ -1243,7 +1242,7 @@
         url = adapter.url
         ## three tries, that's enough if both user/pass & is_adult needed,
         ## or a couple tries of one or the other
-        for x in range(0,2):
+        for x in [0,1,2]:
             try:
                 adapter.getStoryMetadataOnly(get_cover=False)
             except exceptions.FailedToLogin as f:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/calibre-plugin/jobs.py 
new/FanFicFare-4.43.0/calibre-plugin/jobs.py
--- old/FanFicFare-4.42.0/calibre-plugin/jobs.py        2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/calibre-plugin/jobs.py        2025-03-01 
22:27:40.000000000 +0100
@@ -2,7 +2,6 @@
 
 from __future__ import (unicode_literals, division, absolute_import,
                         print_function)
-import six
 
 __license__   = 'GPL v3'
 __copyright__ = '2020, Jim Miller, 2011, Grant Drake <grant.dr...@gmail.com>'
@@ -426,7 +425,7 @@
                 data = {'smarten_punctuation':True}
                 opts = ALL_OPTS.copy()
                 opts.update(data)
-                O = namedtuple('Options', ' '.join(six.iterkeys(ALL_OPTS)))
+                O = namedtuple('Options', ' '.join(ALL_OPTS.keys()))
                 opts = O(**opts)
 
                 log = Log(level=Log.DEBUG)
@@ -459,7 +458,8 @@
     if 'calibre_columns' in book:
         injectini = ['[injected]']
         extra_valid = []
-        for k, v in six.iteritems(book['calibre_columns']):
+        for k in book['calibre_columns'].keys():
+            v = book['calibre_columns'][k]
             story.setMetadata(k,v['val'])
             injectini.append('%s_label:%s'%(k,v['label']))
             extra_valid.append(k)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/calibre-plugin/plugin-defaults.ini 
new/FanFicFare-4.43.0/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.42.0/calibre-plugin/plugin-defaults.ini    2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/calibre-plugin/plugin-defaults.ini    2025-03-01 
22:27:40.000000000 +0100
@@ -124,6 +124,10 @@
 
 ## include a TOC page before the story text
 include_tocpage: true
+## When set to 'true', tocpage is only included if there is more than
+## one chapter in the story.  If set to 'always', tocpage will be
+## included even if the story only has one chapter.
+#include_tocpage: always
 
 ## website encoding(s) In theory, each website reports the character
 ## encoding they use for each page.  In practice, some sites report it
@@ -718,6 +722,9 @@
 storynotes_label:Story Notes
 add_to_extra_titlepage_entries:,storynotes
 
+[base_otw]
+use_basic_cache:true
+
 [base_xenforoforum]
 use_basic_cache:true
 ## Some sites require login for some stories
@@ -900,8 +907,17 @@
 ## spoiler blocks with the original spoiler button text as a label
 ## using fieldset and legend HTML tags.  For a simple box, see the
 ## add_to_output_css example for [base_xenforoforum:epub] below.
+## remove_spoilers overrides legend_spoilers
 #legend_spoilers:false
 
+## This option if uncommented and set true, will change the tags
+## around spoiler blocks to a <details> tag with <summary> tag
+## containing the original spoiler button text.  For a simple line
+## box, see the add_to_output_css example for [base_xenforoforum:epub]
+## below.
+## remove_spoilers and legend_spoilers override details
+#details_spoilers:false
+
 ## True by built-in default, but only applied if using threadmarks for
 ## chapters and a 'reader' URL is found in the thread, 'reader mode'
 ## will reduce the number of pages fetched by roughly 10 to 1 for a
@@ -1032,13 +1048,19 @@
 
 [base_xenforoforum:epub]
 
-## See remove_spoilers above for more about 'spoilers'.  This example
+## See remove_spoilers/etc above for more about 'spoilers'.  This example
 ## shows how to put a simple line around spoiler blocks.  Uncomment
 ## all three lines, keep the leading space before .bbCodeSpoilerContainer.
 #add_to_keep_html_attrs:,style
 #add_to_output_css:
 # .bbCodeSpoilerContainer { border: 1px solid black; padding: 2px; }
 
+## This example shows how to put a simple line around
+## 'details_spoilers' blocks.  Uncomment both lines, keep the leading
+## space before .bbCodeSpoilerContainer.
+#add_to_output_css:
+# .bbCodeSpoilerContainer { border: 1px solid black; padding: 2px; }
+
 ## When reveal_invisible_text:true, you can style the class
 ## invisible_text as you like for forum "invisible text". See
 ## reveal_invisible_text above.  This is just one example.  Note that
@@ -1119,6 +1141,10 @@
 ## epub carries the TOC in metadata.
 ## mobi generated from epub by calibre will have a TOC at the end.
 include_tocpage: false
+## When set to 'true', tocpage is only included if there is more than
+## one chapter in the story.  If set to 'always', tocpage will be
+## included even if the story only has one chapter.
+#include_tocpage: always
 
 ## include a Update Log page before the story text.  If 'true', the
 ## log will be updated each time the epub is and all the metadata
@@ -1537,7 +1563,7 @@
 #password:yourpassword
 
 [archiveofourown.org]
-use_basic_cache:true
+## This is a OTW-archive site.
 ## Some sites require login (or login for some rated stories) The
 ## program can prompt you, or you can save it in config.  In
 ## commandline version, this should go in your personal.ini, not
@@ -1678,7 +1704,7 @@
 
 ## AO3 is blocking people more aggressively.  If you download fewer
 ## stories less often you can likely get by with reducing this sleep.
-slow_down_sleep_time:2
+slow_down_sleep_time:4
 
 ## AO3 allows users to archive stories they didn't write in certain
 ## cases.  These are indicated by showing a byline such as:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/adapter_fanfictionnet.py 
new/FanFicFare-4.43.0/fanficfare/adapters/adapter_fanfictionnet.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/adapter_fanfictionnet.py  
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/adapter_fanfictionnet.py  
2025-03-01 22:27:40.000000000 +0100
@@ -395,6 +395,10 @@
         if "Please email this error message in full to <a href='mailto:"; in 
data:
             raise exceptions.FailedToDownload("Error downloading Chapter: %s!  
FanFiction.net Site Error!" % url)
 
+        if "FanFiction.Net Message Type 1" in data and "Chapter not found." in 
data:
+            logger.debug("Chapter not found, trying m.fanfiction.net instead")
+            data = 
self.get_request(url.replace("www.fanfiction.net","m.fanfiction.net"))
+
         soup = self.make_soup(data)
 
         ## remove inline ads -- only seen with flaresolverr
@@ -402,9 +406,30 @@
             adtag.decompose()
 
         div = soup.find('div', {'id' : 'storytextp'})
+        if not div:
+            ## m.ffnet version
+            div = soup.find('div', {'id' : 'storycontent'})
+            if div:
+                logger.debug("Using m.fanfiction.net version")
+                ## Make divs id/class match www version for benefit of
+                ## users with custom CSS.  Anyone keeping the in-line
+                ## style and align attrs can deal with it themselves.
+
+                ## from
+                ## <div class="storycontent nocopy" id="storycontent" 
style="padding:5px 10px 5px 10px;">'
+                ## to
+                ## <div role="main" aria-label="story content" 
class="storytextp" id="storytextp" align="center" style="padding: 0px 0.5em; 
user-select: none;">
+                ## <div class="storytext xcontrast_txt nocopy" id="storytext">
+                div['class']=['storytext','xcontrast_txt','nocopy']
+                div['id']='storytext'
+                div = div.wrap(soup.new_tag('div'))
+                div.insert(0,"\n")
+                div.append("\n")
+                div['class']='storytextp'
+                div['id']='storytextp'
 
         if None == div:
-            logger.debug('div id=storytextp not found.  data:%s'%data)
+            logger.debug('div id=storytextp (or id=storycontent) not found.  
data:%s'%data)
             raise exceptions.FailedToDownload("Error downloading Chapter: %s!  
Missing required element!" % url)
 
         return self.utf8FromSoup(url,div)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/adapter_fictionlive.py 
new/FanFicFare-4.43.0/fanficfare/adapters/adapter_fictionlive.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/adapter_fictionlive.py    
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/adapter_fictionlive.py    
2025-03-01 22:27:40.000000000 +0100
@@ -418,7 +418,7 @@
                     #   so let's just ignore non-int values here
                     if not isinstance(v, int):
                         continue
-                    if 0 <= v <= len(choices):
+                    if 0 <= v < len(choices):
                         output[v] += 1
             return output
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/adapter_fimfictionnet.py 
new/FanFicFare-4.43.0/fanficfare/adapters/adapter_fimfictionnet.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/adapter_fimfictionnet.py  
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/adapter_fimfictionnet.py  
2025-03-01 22:27:40.000000000 +0100
@@ -99,6 +99,15 @@
                                                                  
params['username']))
                 raise exceptions.FailedToLogin(url,params['username'])
 
+    def make_soup(self,data):
+        soup = super(FimFictionNetSiteAdapter, self).make_soup(data)
+        for img in soup.find_all('img',{'class':'user_image'}):
+            ## FimF has started a 'camo' mechanism for images that
+            ## gets block by CF.  attr data-source is original source.
+            if img.has_attr('data-source'):
+                img['src'] = img['data-source']
+        return soup
+
     def doExtractChapterUrlsAndMetadata(self,get_cover=True):
 
         if self.is_adult or self.getConfig("is_adult"):
@@ -168,12 +177,12 @@
 
         # Cover image
         if get_cover:
-            storyImage = storyContentBox.find('img', {'class':'lazy-img'})
+            storyImage = soup.select_one('div.story_container__story_image 
img')
             if storyImage:
                 coverurl = storyImage['data-fullsize']
                 # try setting from data-fullsize, if fails, try using data-src
                 if self.setCoverImage(self.url,coverurl)[0] == "failedtoload":
-                    coverurl = storyImage['data-src']
+                    coverurl = storyImage['src']
                     self.setCoverImage(self.url,coverurl)
 
                 coverSource = storyImage.parent.find('a', {'class':'source'})
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/adapter_literotica.py 
new/FanFicFare-4.43.0/fanficfare/adapters/adapter_literotica.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/adapter_literotica.py     
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/adapter_literotica.py     
2025-03-01 22:27:40.000000000 +0100
@@ -37,7 +37,7 @@
 
     def __init__(self, config, url):
         BaseSiteAdapter.__init__(self, config, url)
-        logger.debug("LiteroticaComAdapter:__init__ - url='%s'" % url)
+        #logger.debug("LiteroticaComAdapter:__init__ - url='%s'" % url)
 
         # Each adapter needs to have a unique site abbreviation.
         self.story.setMetadata('siteabbrev','litero')
@@ -53,7 +53,7 @@
         ## have been keeping the language when 'normalizing' to first
         ## chapter.
         url = re.sub(r"^(https?://)"+LANG_RE+r"(\.i)?",
-                     r"\1\2",
+                     r"https://\2";,
                      url)
         url = url.replace('/beta/','/') # to allow beta site URLs.
 
@@ -77,7 +77,7 @@
 
     @classmethod
     def getSiteExampleURLs(cls):
-        return "http://www.literotica.com/s/story-title 
https://www.literotica.com/series/se/9999999 
https://www.literotica.com/s/story-title 
https://www.literotica.com/i/image-or-comic-title 
https://www.literotica.com/p/poem-title 
http://portuguese.literotica.com/s/story-title 
http://german.literotica.com/s/story-title";
+        return "https://www.literotica.com/s/story-title 
https://www.literotica.com/series/se/9999999 
https://www.literotica.com/s/story-title 
https://www.literotica.com/i/image-or-comic-title 
https://www.literotica.com/p/poem-title 
https://portuguese.literotica.com/s/story-title 
https://german.literotica.com/s/story-title";
 
     def getSiteURLPattern(self):
         # also https://www.literotica.com/series/se/80075773
@@ -122,13 +122,22 @@
         if "This submission is awaiting moderator's approval" in data:
             raise exceptions.StoryDoesNotExist("This submission is awaiting 
moderator's approval. %s"%self.url)
 
+        ## 2025Feb - domains other than www now use different HTML.
+        ## Need to look for two different versions of basically
+        ## everything.
+
         ## not series URL, assumed to be a chapter.  Look for Story
         ## Info block of post-beta page.  I don't think it should happen?
         if '/series/se' not in self.url:
-            if not soup.select_one('div.page__aside'):
+            #logger.debug(data)
+            ## looking for /series/se URL to indicate this is a
+            ## chapter.
+            if not soup.select_one('div.page__aside') and not 
soup.select_one('div.sidebar'):
                 raise exceptions.FailedToDownload("Missing Story Info block, 
Beta turned off?")
 
             storyseriestag = soup.select_one('a.bn_av')
+            if not storyseriestag:
+                storyseriestag = soup.select_one('a[class^="_files__link_"]')
             # logger.debug("Story Series Tag:%s"%storyseriestag)
 
             if storyseriestag:
@@ -157,6 +166,8 @@
         ## Should change to /authors/ if/when it starts appearing.
         ## Assuming it's in the same place.
         authora = soup.find("a", class_="y_eU")
+        if not authora:
+            authora = soup.select_one('a[class^="_author__title"]')
         authorurl = authora['href']
         if authorurl.startswith('//'):
             authorurl = self.parsedUrl.scheme+':'+authorurl
@@ -171,17 +182,27 @@
         else: # if all else fails
             self.story.setMetadata('authorId', stripHTML(authora))
 
-        self.story.extendList('eroticatags', [ stripHTML(t).title() for t in 
soup.select('div#tabpanel-tags a.av_as') ])
+        if soup.select('div#tabpanel-tags'):
+            # logger.debug("tags1")
+            self.story.extendList('eroticatags', [ stripHTML(t).title() for t 
in soup.select('div#tabpanel-tags a.av_as') ])
+        if soup.select('div[class^="_widget__tags_"]'):
+            # logger.debug("tags2")
+            self.story.extendList('eroticatags', [ stripHTML(t).title() for t 
in soup.select('div[class^="_widget__tags_"] a[class^="_tags__link_"]') ])
+        # logger.debug(self.story.getList('eroticatags'))
 
         ## look first for 'Series Introduction', then Info panel short desc
         ## series can have either, so put in common code.
-        introtag = soup.select_one('div.bp_rh p')
+        introtag = soup.select_one('div.bp_rh')
         descdiv = soup.select_one('div#tabpanel-info div.bn_B')
+        if not descdiv:
+            descdiv = soup.select_one('div[class^="_tab__pane_"] 
div[class^="_widget__info_"]')
         if introtag and stripHTML(introtag):
             # make sure there's something in the tag.
+            # logger.debug("intro %s"%introtag)
             self.setDescription(self.url,introtag)
         elif descdiv and stripHTML(descdiv):
             # make sure there's something in the tag.
+            # logger.debug("desc %s"%descdiv)
             self.setDescription(self.url,descdiv)
         else:
             ## Only for backward compatibility with 'stories' that
@@ -212,7 +233,10 @@
             self.story.setMetadata('status','Completed')
 
             # Add the category from the breadcumb.
-            self.story.addToList('category', soup.find('div', 
id='BreadCrumbComponent').findAll('a')[1].string)
+            breadcrumbs = soup.find('div', id='BreadCrumbComponent')
+            if not breadcrumbs:
+                breadcrumbs = 
soup.select_one('ul[class^="_breadcrumbs_list_"]')
+            self.story.addToList('category', 
breadcrumbs.findAll('a')[1].string)
 
             ## one-shot chapter
             self.add_chapter(self.story.getMetadata('title'), self.url)
@@ -328,14 +352,13 @@
         return
 
     def getPageText(self, raw_page, url):
-        # logger.debug('Getting page text')
-#         logger.debug(soup)
+        logger.debug('Getting page text')
         raw_page = raw_page.replace('<div class="b-story-body-x 
x-r15"><div><p>','<div class="b-story-body-x x-r15"><div>')
-#         logger.debug("\tChapter text: %s" % raw_page)
+        # logger.debug("\tChapter text: %s" % raw_page)
         page_soup = self.make_soup(raw_page)
         [comment.extract() for comment in page_soup.findAll(string=lambda 
text:isinstance(text, Comment))]
         fullhtml = ""
-        for aa_ht_div in page_soup.find_all('div', 'aa_ht'):
+        for aa_ht_div in page_soup.find_all('div', 'aa_ht') + 
page_soup.select('div[class^="_article__content_"]'):
             if aa_ht_div.div:
                 html = unicode(aa_ht_div.div)
                 # Strip some starting and ending tags,
@@ -353,6 +376,9 @@
         raw_page = self.get_request(url)
         page_soup = self.make_soup(raw_page)
         pages = page_soup.find('div',class_='l_bH')
+        if not pages:
+            pages = page_soup.select_one('div._pagination_h0sum_1')
+        # logger.debug(pages)
 
         fullhtml = ""
         chapter_description = ''
@@ -365,7 +391,10 @@
             ## look for highest numbered page, they're not all listed
             ## when there are many.
 
-            last_page_link = pages.find_all('a', class_='l_bJ')[-1]
+            last_page_links = pages.find_all('a', class_='l_bJ')
+            if not last_page_links:
+                last_page_links = 
pages.select('a[class^="_pagination__item_"]')
+            last_page_link = last_page_links[-1]
             last_page_no = 
int(urlparse.parse_qs(last_page_link['href'].split('?')[1])['page'][0])
             # logger.debug(last_page_no)
             for page_no in range(2, last_page_no+1):
@@ -374,7 +403,7 @@
                 raw_page = self.get_request(page_url)
                 fullhtml += self.getPageText(raw_page, url)
 
-#         logger.debug(fullhtml)
+        #logger.debug(fullhtml)
         page_soup = self.make_soup(fullhtml)
         fullhtml = self.utf8FromSoup(url, self.make_soup(fullhtml))
         fullhtml = chapter_description + fullhtml
@@ -382,6 +411,94 @@
 
         return fullhtml
 
+    def get_urls_from_page(self,url,normalize):
+        from ..geturls import get_urls_from_html
+
+        ## hook for logins, etc.
+        self.before_get_urls_from_page(url,normalize)
+
+        # this way it uses User-Agent or other special settings.
+        data = self.get_request(url,usecache=False)
+
+        page_urls = get_urls_from_html(self.make_soup(data), url, 
configuration=self.configuration, normalize=normalize)
+
+        user_story_list = 
re.search(r'literotica\.com/authors/.+?/lists\?listid=(?P<list_id>\d+)', url)
+        fav_authors = re.search(r'literotica\.com/authors/.+?/favorites', url)
+        written = re.search(r'literotica.com/authors/.+?/works/', url)
+        # logger.debug((user_story_list, fav_authors, written))
+
+        # If the url is not supported
+        if not user_story_list and not fav_authors and not written:
+            return {'urllist':page_urls}
+
+        # Grabbing the main list where chapters are contained.
+        if user_story_list:
+            js_story_list = 
re.search(r'data:\$R\[\d+?\]=\{pages:\$R\[\d+?\]=\[\$R\[\d+?\]=\{(?P<pages>success:!\d,current_page:\d+,last_page:\d+,total:\d+,per_page:\d+)(,has_series:!\d)?,data:\$R\[\d+\]=\[\$R\[\d+\]=\{allow_vote(?P<data>.+)\}\],pageParams',
 data)
+            logger.debug('user_story_list ID 
[%s]'%user_story_list.group('list_id'))
+        else:
+            js_story_list = 
re.search(r'data:\$R\[\d+?\]=\{pages:\$R\[\d+?\]=\[\$R\[\d+?\]=\{(?P<pages>current_page:\d+,last_page:\d+,total:\d+,per_page:\d+)(,has_series:!\d)?,data:\$R\[\d+\]=\[\$R\[\d+\]=\{(?!aim)(?P<data>.+)\}\],pageParams',
 data)
+
+        # In case the regex becomes outdated
+        if not js_story_list:
+            return {'urllist':page_urls}
+
+        user = re.search(r'literotica\.com\/authors\/(.+?)\/', url)
+        # Extract the current (should be 1) and last page numbers from the js.
+        pages = 
re.search(r"current_page:(?P<current>\d+),last_page:(?P<last>\d+),total:\d+", 
js_story_list.group('pages'))
+        logger.debug("Pages %s/%s"%(int(pages.group('current')), 
int(pages.group('last'))))
+
+        urls = []
+        # Necessary to format a proper link as there were no visible data 
specifying what kind of link that should be.
+        cat_to_link = {'adult-comics': 'i', 'erotic-art': 'i', 
'illustrated-poetry': 'p', 'erotic-audio-poetry': 'p', 'erotic-poetry': 'p', 
'non-erotic-poetry': 'p'}
+        stroy_urltype = 
re.findall(r"category_info:\$R\[.*?type:\".+?\",pageUrl:\"(.+?)\"}.+?,type:\"(.+?)\",url:\"(.+?)\",",
 js_story_list.group('data'))
+        for i in range(len(stroy_urltype)):
+            
urls.append('https://www.literotica.com/%s/%s'%(cat_to_link.get(stroy_urltype[i][0],
 's'), stroy_urltype[i][2]))
+
+        # Removes the duplicates
+        seen = set()
+        urls = [x for x in (page_urls + urls) if not (x in seen or 
seen.add(x))]
+        logger.debug("Found [%s] stories so far."%len(urls))
+
+        # Sometimes the rest of the stories are burried in the js so no 
fetching in necessery.
+        if int(pages.group('last')) == int(pages.group('current')):
+            return {'urllist': urls}
+
+        user = urlparse.quote(user.group(1))
+        logger.debug("User: [%s]"%user)
+
+        import json
+        last_page = int(pages.group('last'))
+        current_page = int(pages.group('current')) + 1
+        # Fetching the remaining urls from api. Can't trust the number given 
about the pages left from a website. Sometimes even the api returns outdated 
number of pages. 
+        while current_page <= last_page:
+            i = len(urls)
+            logger.debug("Pages %s/%s"%(current_page, int(last_page)))
+            if fav_authors:
+                jsn = 
self.get_request('https://literotica.com/api/3/users/{}/favorite/works?params=%7B%22page%22%3A{}%2C%22pageSize%22%3A50%2C%22type%22%3A%22{}%22%2C%22withSeriesDetails%22%3Atrue%7D'.format(user,
 current_page, stroy_urltype[0][1]))
+            elif user_story_list:
+                jsn = 
self.get_request('https://literotica.com/api/3/users/{}/list/{}?params=%7B%22page%22%3A{}%2C%22pageSize%22%3A50%2C%22withSeriesDetails%22%3Atrue%7D'.format(user,
 user_story_list.group('list_id'), current_page))
+            else:
+                jsn = 
self.get_request('https://literotica.com/api/3/users/{}/series_and_works?params=%7B%22page%22%3A{}%2C%22pageSize%22%3A50%2C%22sort%22%3A%22date%22%2C%22type%22%3A%22{}%22%2C%22listType%22%3A%22expanded%22%7D'.format(user,
 current_page, stroy_urltype[0][1]))
+
+            urls_data = json.loads(jsn)
+            last_page = urls_data["last_page"]
+            current_page = int(urls_data["current_page"]) + 1
+            for story in urls_data['data']:
+                if story['url']:
+                    
urls.append('https://www.literotica.com/%s/%s'%(cat_to_link.get(story["category_info"]["pageUrl"],
 's'), str(story['url'])))
+                    continue
+                # Series has no url specified and contains all of the story 
links belonging to the series
+                
urls.append('https://www.literotica.com/series/se/%s'%str(story['id']))
+                for series_story in story['parts']:
+                    
urls.append('https://www.literotica.com/%s/%s'%(cat_to_link.get(series_story["category_info"]["pageUrl"],
 's'), str(series_story['url'])))
+            logger.debug("Found [%s] stories."%(len(urls) - i))
+
+        # Again removing duplicates.
+        seen = set()
+        urls = [x for x in urls if not (x in seen or seen.add(x))]
+
+        logger.debug("Found total of [%s] stories"%len(urls))
+        return {'urllist':urls}
 
 def getClass():
     return LiteroticaSiteAdapter
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/adapter_mcstoriescom.py 
new/FanFicFare-4.43.0/fanficfare/adapters/adapter_mcstoriescom.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/adapter_mcstoriescom.py   
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/adapter_mcstoriescom.py   
2025-03-01 22:27:40.000000000 +0100
@@ -64,7 +64,9 @@
         return "https://mcstories.com/StoryTitle/ 
https://mcstories.com/StoryTitle/index.html 
https://mcstories.com/StoryTitle/StoryTitle1.html";
 
     def getSiteURLPattern(self):
-        return r"https?://(www\.)?mcstories\.com/([a-zA-Z0-9_-]+)/"
+        ## Note that this uses a regular expression *negative*
+        ## lookahead--story URLs *can't* have /Titles/ /Authors/ etc.
+        return 
r"https?://(www\.)?mcstories\.com(?!/(Titles|Authors|Tags|ReadersPicks)/)/[a-zA-Z0-9_-]+/"
 
     def extractChapterUrlsAndMetadata(self):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/adapter_storiesonlinenet.py 
new/FanFicFare-4.43.0/fanficfare/adapters/adapter_storiesonlinenet.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/adapter_storiesonlinenet.py       
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/adapter_storiesonlinenet.py       
2025-03-01 22:27:40.000000000 +0100
@@ -210,11 +210,7 @@
         a = soup.find('h1')
         self.story.setMetadata('title',stripHTML(a))
 
-        # Find authorid and URL from... author url.  Sometimes in top,
-        # other times in footer.
-        authfrom = soup.find('div', {'id':'top-header'})
-        if authfrom is None or 'author' not in str(authfrom):
-            authfrom = soup.find('footer')
+        authfrom = soup.find('footer')
         alist = authfrom.findAll('a', {'rel' : 'author'})
         for a in alist:
             self.story.addToList('authorId',a['href'].split('/')[2])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/base_otw_adapter.py 
new/FanFicFare-4.43.0/fanficfare/adapters/base_otw_adapter.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/base_otw_adapter.py       
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/base_otw_adapter.py       
2025-03-01 22:27:40.000000000 +0100
@@ -147,6 +147,9 @@
             # note that it's not *actually* a 503 code...
             raise exceptions.FailedToDownload('Site is currently unavailable.')
 
+        if 'This site is in beta. Things may break or crash without notice.' 
in data:
+            raise exceptions.FailedToDownload('Page failed to load, reported 
"This site is in beta".')
+
         meta = self.get_request(metaurl)
 
         if 'This work is part of an ongoing challenge and will be revealed 
soon!' in meta:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/adapters/base_xenforoforum_adapter.py 
new/FanFicFare-4.43.0/fanficfare/adapters/base_xenforoforum_adapter.py
--- old/FanFicFare-4.42.0/fanficfare/adapters/base_xenforoforum_adapter.py      
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/adapters/base_xenforoforum_adapter.py      
2025-03-01 22:27:40.000000000 +0100
@@ -850,6 +850,16 @@
                 legend.string = stripHTML(div.button.span)
                 div.insert(0,legend)
                 div.button.extract()
+        elif self.getConfig('details_spoilers'):
+            for div in self.get_spoiler_tags(topsoup):
+                div.name='details'
+                # add copy of XF1 class name for convenience of
+                # existing output_css when XF2.
+                div['class'].append('bbCodeSpoilerContainer')
+                legend = topsoup.new_tag('summary')
+                legend.string = stripHTML(div.button.span)
+                div.insert(0,legend)
+                div.button.extract()
 
     def 
_do_utf8FromSoup(self,url,soup,fetch=None,allow_replace_br_with_p=True):
         if self.getConfig('reveal_invisible_text'):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/browsercache/__init__.py 
new/FanFicFare-4.43.0/fanficfare/browsercache/__init__.py
--- old/FanFicFare-4.42.0/fanficfare/browsercache/__init__.py   2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/browsercache/__init__.py   2025-03-01 
22:27:40.000000000 +0100
@@ -31,11 +31,12 @@
     Class to read web browser cache
     This wrapper class contains the actual impl object.
     """
-    def __init__(self, getConfig_fn, getConfigList_fn):
+    def __init__(self, site, getConfig_fn, getConfigList_fn):
         """Constructor for BrowserCache"""
         # import of child classes have to be inside the def to avoid circular 
import error
         for browser_cache_class in [SimpleCache, BlockfileCache, 
FirefoxCache2]:
-            self.browser_cache_impl = 
browser_cache_class.new_browser_cache(getConfig_fn,
+            self.browser_cache_impl = 
browser_cache_class.new_browser_cache(site,
+                                                                            
getConfig_fn,
                                                                             
getConfigList_fn)
             if self.browser_cache_impl is not None:
                 break
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/browsercache/base_browsercache.py 
new/FanFicFare-4.43.0/fanficfare/browsercache/base_browsercache.py
--- old/FanFicFare-4.42.0/fanficfare/browsercache/base_browsercache.py  
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/browsercache/base_browsercache.py  
2025-03-01 22:27:40.000000000 +0100
@@ -19,6 +19,7 @@
 import time, datetime
 import gzip
 import zlib
+import re
 try:
     # py3 only, calls C libraries. CLI
     import brotli
@@ -50,9 +51,10 @@
 class BaseBrowserCache(object):
     """Base class to read various formats of web browser cache file"""
 
-    def __init__(self, getConfig_fn, getConfigList_fn):
+    def __init__(self, site, getConfig_fn, getConfigList_fn):
         """Constructor for BaseBrowserCache"""
         ## only ever called by class method new_browser_cache()
+        self.site = site
         self.getConfig = getConfig_fn
         self.getConfigList = getConfigList_fn
 
@@ -65,11 +67,12 @@
             self.age_limit = float(age_limit) * 3600
 
     @classmethod
-    def new_browser_cache(cls, getConfig_fn, getConfigList_fn):
+    def new_browser_cache(cls, site, getConfig_fn, getConfigList_fn):
         """Return new instance of this BrowserCache class, or None if supplied 
directory not the correct cache type"""
         if 
cls.is_cache_dir(cls.expand_cache_dir(getConfig_fn(CACHE_DIR_CONFIG))):
             try:
-                return cls(getConfig_fn,
+                return cls(site,
+                           getConfig_fn,
                            getConfigList_fn)
             except BrowserCacheException:
                 return None
@@ -135,27 +138,36 @@
         """
         raise NotImplementedError()
 
-    def make_key_parts(self, url):
+    def make_key_parts(self, url, site=False):
         """
         Modern browser all also key their cache with the domain to
         reduce info leaking, but differently.  However, some parts
-        are common
+        are common.
+
+        Now returns a list of domains, one for the story URL site and
+        one for the URLs own domain.  Cache partitioning of images is
+        done based on the parent page (ie, the story site), but if
+        it's not found/expired/etc and called directly instead, then
+        it will be partitioned by the image URL instead.  This way we
+        have both.
         """
         parsedUrl = urlparse(url)
         scheme = parsedUrl.scheme
-        domain = parsedUrl.netloc
-        # logger.debug(domain)
+        domains = [self.site, parsedUrl.netloc]
+
 
-        # discard www. -- others likely needed to distinguish host
-        # from domain.  Something like tldextract ideally, but
-        # dependencies
-        # XXX forums?
-        domain = domain.replace('www.','')
+        ## only keep the first domain.TLD, more general than
+        ## discarding www.
+        domains = [ re.sub(r'.*?([^\.]+\.[^\.]+)$',r'\1',d) for d in domains ]
+        ## don't need both if they are the same.  Could use a set() to
+        ## dedup, but want to preserve order.
+        if domains[0] == domains[1]:
+            domains.pop()
 
         # discard any #anchor part
         url = url.split('#')[0]
 
-        return (scheme, domain, url) # URL still contains domain, params, etc
+        return (scheme, domains, url) # URL still contains domain, params, etc
 
     def make_redirect_url(self,location,origurl):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/browsercache/base_chromium.py 
new/FanFicFare-4.43.0/fanficfare/browsercache/base_chromium.py
--- old/FanFicFare-4.42.0/fanficfare/browsercache/base_chromium.py      
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/browsercache/base_chromium.py      
2025-03-01 22:27:40.000000000 +0100
@@ -39,10 +39,9 @@
     # 1/0/_dk_chrome-extension://akiljllkbielkidmammnifcnibaigelm 
chrome-extension://akiljllkbielkidmammnifcnibaigelm 
https://www.fanfiction.net/s/11377932/2/Guilt
     # 1/0/_dk_chrome-extension://akiljllkbielkidmammnifcnibaigelm 
chrome-extension://akiljllkbielkidmammnifcnibaigelm 
https://www.fanfiction.net/s/14161667/10/That-Time-I-Was-Reincarnated-In-Brockton-Bay
     def make_keys(self,url):
-        (scheme, domain, url) = self.make_key_parts(url)
-        return [ '1/0/_dk_'+scheme+'://'+domain+' '+scheme+'://'+domain+' 
'+url,
-                 '1/0/_dk_chrome-extension://akiljllkbielkidmammnifcnibaigelm 
chrome-extension://akiljllkbielkidmammnifcnibaigelm '+url
-                 ]
+        (scheme, domains, url) = self.make_key_parts(url)
+        return [ '1/0/_dk_'+scheme+'://'+d+' '+scheme+'://'+d+' '+url for d in 
domains ] + \
+            [ '1/0/_dk_chrome-extension://akiljllkbielkidmammnifcnibaigelm 
chrome-extension://akiljllkbielkidmammnifcnibaigelm '+url ]
 
     def make_age(self,response_time):
         return int(response_time/1000000)-EPOCH_DIFFERENCE
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/browsercache/browsercache_firefox2.py 
new/FanFicFare-4.43.0/fanficfare/browsercache/browsercache_firefox2.py
--- old/FanFicFare-4.42.0/fanficfare/browsercache/browsercache_firefox2.py      
2025-02-01 23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/browsercache/browsercache_firefox2.py      
2025-03-01 22:27:40.000000000 +0100
@@ -48,6 +48,7 @@
         self.utc_offset = datetime.datetime.now() - 
utcnow().replace(tzinfo=None)
 
         # self.scan_cache_keys()
+        # logger.debug("cache site:%s"%self.site)
         # 1/0
 
     def scan_cache_keys(self):
@@ -59,7 +60,7 @@
             if entry.stat().st_mtime > time.time() - 3600: # last hour only
                 with share_open(entry.path, "rb") as entry_file:
                     metadata = _read_entry_headers(entry_file)
-                    if '14055284' in metadata['key']:
+                    if 'Battle_of_Antarctica_9' in metadata['key']:
                         
logger.debug("%s->%s"%(metadata['key'],metadata['key_hash']))
 
     @staticmethod
@@ -77,14 +78,12 @@
         return False
 
     def make_keys(self,url):
-        (scheme,domain, url) = self.make_key_parts(url)
+        (scheme, domains, url) = self.make_key_parts(url)
         ## WebToEpub appears to leave just
         ## ':'+url
         ## May 2024, WebToEpub now uses '~FETCH,:'
-        return [ 'O^partitionKey=%28'+scheme+'%2C'+domain+'%29,:'+url,
-                 ':'+url,
-                 '~FETCH,:'+url
-                 ]
+        return [ 'O^partitionKey=%28'+scheme+'%2C'+d+'%29,:'+url for d in 
domains ] + \
+            [ ':'+url, '~FETCH,:'+url ]
 
     def make_key_path(self,key):
         logger.debug(key)
@@ -97,6 +96,7 @@
     def get_data_key_impl(self, url, key):
         key_path = self.make_key_path(key)
         if os.path.isfile(key_path): # share_open()'s failure for non-existent 
is some win error.
+            logger.debug("found cache: %s"%key_path)
             with share_open(key_path, "rb") as entry_file:
                 metadata = _read_entry_headers(entry_file)
                 # import json
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/fanficfare/cli.py 
new/FanFicFare-4.43.0/fanficfare/cli.py
--- old/FanFicFare-4.42.0/fanficfare/cli.py     2025-02-01 23:52:54.000000000 
+0100
+++ new/FanFicFare-4.43.0/fanficfare/cli.py     2025-03-01 22:27:40.000000000 
+0100
@@ -28,7 +28,7 @@
 import os, sys, platform
 
 
-version="4.42.0"
+version="4.43.0"
 os.environ['CURRENT_VERSION_ID']=version
 
 global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/fanficfare/configurable.py 
new/FanFicFare-4.43.0/fanficfare/configurable.py
--- old/FanFicFare-4.42.0/fanficfare/configurable.py    2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/configurable.py    2025-03-01 
22:27:40.000000000 +0100
@@ -190,7 +190,7 @@
 
     valdict = {'collect_series':(None,None,boollist),
                'include_titlepage':(None,None,boollist),
-               'include_tocpage':(None,None,boollist),
+               'include_tocpage':(None,None,boollist+['always']),
                'is_adult':(None,None,boollist),
                'keep_style_attr':(None,None,boollist),
                'keep_title_attr':(None,None,boollist),
@@ -296,6 +296,7 @@
                'author_avatar_cover':(base_xenforo_list,None,boollist),
                
'remove_spoilers':(base_xenforo_list+['royalroad.com'],None,boollist),
                'legend_spoilers':(base_xenforo_list+['royalroad.com', 
'fiction.live'],None,boollist),
+               'details_spoilers':(base_xenforo_list,None,boollist),
                'apocrypha_to_omake':(base_xenforo_list,None,boollist),
                
'replace_failed_smilies_with_alt_text':(base_xenforo_list,None,boollist),
                'use_threadmark_wordcounts':(base_xenforo_list,None,boollist),
@@ -556,6 +557,7 @@
                  'reader_posts_per_page',
                  'remove_spoilers',
                  'legend_spoilers',
+                 'details_spoilers',
                  'apocrypha_to_omake',
                  'skip_threadmarks_categories',
                  'fix_relative_text_links',
@@ -614,7 +616,8 @@
 
     def __init__(self, sections, fileform, lightweight=False,
                  basic_cache=None, browser_cache=None):
-        site = sections[-1] # first section is site DN.
+        self.site = sections[-1] # first section is site DN.
+        logger.debug("config site:%s"%self.site)
         ConfigParser.__init__(self)
 
         self.fetcher = None # the network layer for getting pages the
@@ -637,12 +640,12 @@
         for section in sections[:-1]:
             self.addConfigSection(section)
 
-        if site.startswith("www."):
-            sitewith = site
-            sitewithout = site.replace("www.","")
+        if self.site.startswith("www."):
+            sitewith = self.site
+            sitewithout = self.site.replace("www.","")
         else:
-            sitewith = "www."+site
-            sitewithout = site
+            sitewith = "www."+self.site
+            sitewithout = self.site
 
         self.addConfigSection(sitewith)
         self.addConfigSection(sitewithout)
@@ -1088,7 +1091,8 @@
                     ## make a data list of decorators to re-apply if
                     ## there are many more.
                     if self.browser_cache is None:
-                        self.browser_cache = BrowserCache(self.getConfig,
+                        self.browser_cache = BrowserCache(self.site,
+                                                          self.getConfig,
                                                           self.getConfigList)
                     
fetchers.BrowserCacheDecorator(self.browser_cache).decorate_fetcher(self.fetcher)
                 except Exception as e:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/fanficfare/defaults.ini 
new/FanFicFare-4.43.0/fanficfare/defaults.ini
--- old/FanFicFare-4.42.0/fanficfare/defaults.ini       2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/defaults.ini       2025-03-01 
22:27:40.000000000 +0100
@@ -124,6 +124,10 @@
 
 ## include a TOC page before the story text
 include_tocpage: true
+## When set to 'true', tocpage is only included if there is more than
+## one chapter in the story.  If set to 'always', tocpage will be
+## included even if the story only has one chapter.
+#include_tocpage: always
 
 ## website encoding(s) In theory, each website reports the character
 ## encoding they use for each page.  In practice, some sites report it
@@ -719,6 +723,9 @@
 storynotes_label:Story Notes
 add_to_extra_titlepage_entries:,storynotes
 
+[base_otw]
+use_basic_cache:true
+
 [base_xenforoforum]
 use_basic_cache:true
 ## Some sites require login for some stories
@@ -892,8 +899,17 @@
 ## spoiler blocks with the original spoiler button text as a label
 ## using fieldset and legend HTML tags.  For a simple box, see the
 ## add_to_output_css example for [base_xenforoforum:epub] below.
+## remove_spoilers overrides legend_spoilers
 #legend_spoilers:false
 
+## This option if uncommented and set true, will change the tags
+## around spoiler blocks to a <details> tag with <summary> tag
+## containing the original spoiler button text.  For a simple line
+## box, see the add_to_output_css example for [base_xenforoforum:epub]
+## below.
+## remove_spoilers and legend_spoilers override details
+#details_spoilers:false
+
 ## True by built-in default, but only applied if using threadmarks for
 ## chapters and a 'reader' URL is found in the thread, 'reader mode'
 ## will reduce the number of pages fetched by roughly 10 to 1 for a
@@ -1024,13 +1040,19 @@
 
 [base_xenforoforum:epub]
 
-## See remove_spoilers above for more about 'spoilers'.  This example
+## See remove_spoilers/etc above for more about 'spoilers'.  This example
 ## shows how to put a simple line around spoiler blocks.  Uncomment
 ## all three lines, keep the leading space before .bbCodeSpoilerContainer.
 #add_to_keep_html_attrs:,style
 #add_to_output_css:
 # .bbCodeSpoilerContainer { border: 1px solid black; padding: 2px; }
 
+## This example shows how to put a simple line around
+## 'details_spoilers' blocks.  Uncomment both lines, keep the leading
+## space before .bbCodeSpoilerContainer.
+#add_to_output_css:
+# .bbCodeSpoilerContainer { border: 1px solid black; padding: 2px; }
+
 ## When reveal_invisible_text:true, you can style the class
 ## invisible_text as you like for forum "invisible text". See
 ## reveal_invisible_text above.  This is just one example.  Note that
@@ -1109,6 +1131,10 @@
 ## epub carries the TOC in metadata.
 ## mobi generated from epub by calibre will have a TOC at the end.
 include_tocpage: false
+## When set to 'true', tocpage is only included if there is more than
+## one chapter in the story.  If set to 'always', tocpage will be
+## included even if the story only has one chapter.
+#include_tocpage: always
 
 ## include a Update Log page before the story text.  If 'true', the
 ## log will be updated each time the epub is and all the metadata
@@ -1532,7 +1558,7 @@
 #password:yourpassword
 
 [archiveofourown.org]
-use_basic_cache:true
+## This is a OTW-archive site.
 ## Some sites require login (or login for some rated stories) The
 ## program can prompt you, or you can save it in config.  In
 ## commandline version, this should go in your personal.ini, not
@@ -1673,7 +1699,7 @@
 
 ## AO3 is blocking people more aggressively.  If you download fewer
 ## stories less often you can likely get by with reducing this sleep.
-slow_down_sleep_time:2
+slow_down_sleep_time:4
 
 ## AO3 allows users to archive stories they didn't write in certain
 ## cases.  These are indicated by showing a byline such as:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.42.0/fanficfare/fetchers/cache_browser.py 
new/FanFicFare-4.43.0/fanficfare/fetchers/cache_browser.py
--- old/FanFicFare-4.42.0/fanficfare/fetchers/cache_browser.py  2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/fetchers/cache_browser.py  2025-03-01 
22:27:40.000000000 +0100
@@ -76,11 +76,18 @@
                     #     logger.debug("First time for (%s) extra 
sleep"%parsedUrl.netloc)
                     #     time.sleep(10)
                     fromcache=False
-                    read_try_sleeps = [2, 2, 4, 5, 6]
+                    read_try_sleeps = [2, 2, 4, 10, 20]
                     while not d and read_try_sleeps:
                         time.sleep(read_try_sleeps.pop(0))
                         logger.debug("Checking for cache...")
-                        d = self.cache.get_data(url)
+                        try:
+                            d = self.cache.get_data(url)
+                        except Exception as e:
+                            ## catch exception while retrying, but
+                            ## re-raise if out of retries.
+                            logger.debug("Exception reading cache after 
open_pages_in_browser %s"%e)
+                            if not read_try_sleeps:
+                                raise
                     # logger.debug(d)
                     open_tries -= 1
                     domain_open_tries[parsedUrl.netloc] = 
domain_open_tries.get(parsedUrl.netloc,0) + 1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/fanficfare/geturls.py 
new/FanFicFare-4.43.0/fanficfare/geturls.py
--- old/FanFicFare-4.42.0/fanficfare/geturls.py 2025-02-01 23:52:54.000000000 
+0100
+++ new/FanFicFare-4.43.0/fanficfare/geturls.py 2025-03-01 22:27:40.000000000 
+0100
@@ -77,6 +77,7 @@
             href = form_url(url,a['href'])
             # logger.debug("1 urlhref:%s"%href)
             href = cleanup_url(href,configuration,foremail)
+            # logger.debug("1.5 urlhref:%s"%href)
             try:
                 # logger.debug("2 urlhref:%s"%href)
                 adapter = adapters.getAdapter(configuration,href)
@@ -180,6 +181,12 @@
             href = href.replace('&index=1','')
         except Exception as e:
             logger.warning("Skipping royalroad email URL %s, got HTTP error 
%s"%(href,e))
+    if '/../' in href:
+        ## For mcstories.com, see #1160 All my attempts to use
+        ## urljoin() got uncomfortably complex in the face of
+        ## javascript links and parameter URLs.  And normpath() will
+        ## give \ on windows.
+        href = re.sub(r'([^/]+/../)',r'',href)
     return href
 
 def 
get_urls_from_imap(srv,user,passwd,folder,markread=True,normalize_urls=False):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/fanficfare/writers/base_writer.py 
new/FanFicFare-4.43.0/fanficfare/writers/base_writer.py
--- old/FanFicFare-4.42.0/fanficfare/writers/base_writer.py     2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/writers/base_writer.py     2025-03-01 
22:27:40.000000000 +0100
@@ -69,6 +69,9 @@
     def _write(self, out, text):
         out.write(ensure_binary(text))
 
+    def includeToCPage(self):
+        return (self.getConfig("include_tocpage")=='always' or 
(self.story.getChapterCount() > 1 and self.getConfig("include_tocpage"))) and 
not self.metaonly
+
     def writeTitlePage(self, out, START, ENTRY, END, WIDE_ENTRY=None, 
NO_TITLE_ENTRY=None):
         """
         Write the title page, but only include entries that there's
@@ -139,7 +142,7 @@
         names as Story.metadata, but ENTRY should use index and chapter.
         """
         # Only do TOC if there's more than one chapter and it's configured.
-        if self.story.getChapterCount() > 1 and 
self.getConfig("include_tocpage") and not self.metaonly :
+        if self.includeToCPage():
             if self.hasConfig("tocpage_start"):
                 START = string.Template(self.getConfig("tocpage_start"))
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/fanficfare/writers/writer_epub.py 
new/FanFicFare-4.43.0/fanficfare/writers/writer_epub.py
--- old/FanFicFare-4.42.0/fanficfare/writers/writer_epub.py     2025-02-01 
23:52:54.000000000 +0100
+++ new/FanFicFare-4.43.0/fanficfare/writers/writer_epub.py     2025-03-01 
22:27:40.000000000 +0100
@@ -597,7 +597,7 @@
         if self.getConfig("include_titlepage"):
             
items.append(("title_page","OEBPS/title_page.xhtml","application/xhtml+xml","Title
 Page"))
             itemrefs.append("title_page")
-        if self.story.getChapterCount() > 1 and 
self.getConfig("include_tocpage") and not self.metaonly :
+        if self.includeToCPage():
             
items.append(("toc_page","OEBPS/toc_page.xhtml","application/xhtml+xml","Table 
of Contents"))
             itemrefs.append("toc_page")
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.42.0/pyproject.toml 
new/FanFicFare-4.43.0/pyproject.toml
--- old/FanFicFare-4.42.0/pyproject.toml        2025-02-01 23:52:54.000000000 
+0100
+++ new/FanFicFare-4.43.0/pyproject.toml        2025-03-01 22:27:40.000000000 
+0100
@@ -16,7 +16,7 @@
 #
 # For a discussion on single-sourcing the version, see
 # https://packaging.python.org/guides/single-sourcing-package-version/
-version = "4.42.0"
+version = "4.43.0"
 
 # This is a one-line description or tagline of what your project does. This
 # corresponds to the "Summary" metadata field:

++++++ _scmsync.obsinfo ++++++
--- /var/tmp/diff_new_pack.WMIMQ4/_old  2025-03-03 16:05:55.049228516 +0100
+++ /var/tmp/diff_new_pack.WMIMQ4/_new  2025-03-03 16:05:55.053228683 +0100
@@ -1,5 +1,5 @@
-mtime: 1738966863
-commit: 5339f64ac28a016306f8de898d06408196639c93b92e706af47cfdb71a68a00f
-url: https://src.opensuse.org/mcepl/python-fanficfare.git
-revision: 5339f64ac28a016306f8de898d06408196639c93b92e706af47cfdb71a68a00f
+mtime: 1741006121
+commit: 0f0797162e04d686826c342b31f84e54c13aa6e714ef1b92e19b3e6e34a28bd9
+url: https://src.opensuse.org/pool/python-fanficfare.git
+revision: 0f0797162e04d686826c342b31f84e54c13aa6e714ef1b92e19b3e6e34a28bd9
 

++++++ build.specials.obscpio ++++++
diff: old/*: No such file or directory
diff: new/*: No such file or directory

Reply via email to