Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-fanficfare for 
openSUSE:Factory checked in at 2026-03-02 17:40:22
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
 and      /work/SRC/openSUSE:Factory/.python-fanficfare.new.29461 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-fanficfare"

Mon Mar  2 17:40:22 2026 rev:77 rq:1335720 version:4.55.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes      
2026-02-11 18:48:42.125010920 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-fanficfare.new.29461/python-fanficfare.changes
   2026-03-02 17:40:30.367796089 +0100
@@ -1,0 +2,18 @@
+Mon Mar  2 07:35:06 UTC 2026 - Matej Cepl <[email protected]>
+
+- Update to the version 4.55.0:
+  - Epub Update: Skip missing chapter, image and css files
+    instead of failing.
+  - Plugin only: In Skip mode, don't do initial metadata fetch if
+    already matched in library. #1309
+  - Ignore CSS url() when ttf/otf/woff/woff2 font files
+  - Additional checks for svg images to reject--Calibre only.
+    Related to #1298
+  - Refactor metadata entry and settings name code a bit
+  - Remove Site: swi.org.ru No DNS for site.
+  - Make some metadata entries immutable
+  - adapter_adultfanfictionorg: Fixes for site changes #1305
+  - adapter_fimfictionnet/adapter_royalroadcom: Better handling
+    of cover image size fall back #1306
+
+-------------------------------------------------------------------

Old:
----
  FanFicFare-4.54.0.tar.gz

New:
----
  FanFicFare-4.55.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.3LdAAb/_old  2026-03-02 17:40:31.075825372 +0100
+++ /var/tmp/diff_new_pack.3LdAAb/_new  2026-03-02 17:40:31.079825537 +0100
@@ -20,7 +20,7 @@
 %define modnamedown fanficfare
 %define skip_python2 1
 Name:           python-fanficfare
-Version:        4.54.0
+Version:        4.55.0
 Release:        0
 Summary:        Tool for making eBooks from stories on fanfiction and other 
web sites
 License:        GPL-3.0-only

++++++ FanFicFare-4.54.0.tar.gz -> FanFicFare-4.55.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/calibre-plugin/__init__.py 
new/FanFicFare-4.55.0/calibre-plugin/__init__.py
--- old/FanFicFare-4.54.0/calibre-plugin/__init__.py    2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/calibre-plugin/__init__.py    2026-03-01 
16:25:11.000000000 +0100
@@ -33,7 +33,7 @@
 from calibre.customize import InterfaceActionBase
 
 # pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 54, 0)
+__version__ = (4, 55, 0)
 
 ## Apparently the name for this class doesn't matter--it was still
 ## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/calibre-plugin/fff_plugin.py 
new/FanFicFare-4.55.0/calibre-plugin/fff_plugin.py
--- old/FanFicFare-4.54.0/calibre-plugin/fff_plugin.py  2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/calibre-plugin/fff_plugin.py  2026-03-01 
16:25:11.000000000 +0100
@@ -1307,6 +1307,13 @@
         if self.reject_url(merge,book):
             return
 
+        ## Check existing for SKIP mode.  Again, redundant with below
+        ## for when story URL changes, but also kept here to avoid
+        ## network hit.
+        identicalbooks = self.do_id_search(url)
+        if collision == SKIP and identicalbooks:
+            raise NotGoingToDownload(_("Skipping duplicate 
story."),"list_remove.png")
+
         # Dialogs should prevent this case now.
         if collision in (UPDATE,UPDATEALWAYS) and fileform != 'epub':
             raise NotGoingToDownload(_("Cannot update non-epub format."))
@@ -2267,7 +2274,6 @@
         errorcol_label = self.get_custom_col_label(prefs['errorcol'])
         lastcheckedcol_label = 
self.get_custom_col_label(prefs['lastcheckedcol'])
 
-        columns = self.gui.library_view.model().custom_columns
         if good_list or prefs['mark'] or (bad_list and errorcol_label) or 
lastcheckedcol_label:
             LoopProgressDialog(self.gui,
                                good_list+bad_list,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/calibre-plugin/plugin-defaults.ini 
new/FanFicFare-4.55.0/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.54.0/calibre-plugin/plugin-defaults.ini    2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/calibre-plugin/plugin-defaults.ini    2026-03-01 
16:25:11.000000000 +0100
@@ -1604,13 +1604,6 @@
 disclaimer_label:Disclaimer
 extra_titlepage_entries:eroticatags,disclaimer
 
-## Some sites require login (or login for some rated stories) The
-## program can prompt you, or you can save it in config.  In
-## commandline version, this should go in your personal.ini, not
-## defaults.ini.
-#username:YourName
-#password:yourpassword
-
 [althistory.com]
 ## Note this is NOT the same as www.alternatehistory.com
 ## see [base_xenforoforum]
@@ -4433,9 +4426,6 @@
 extracharacters:Buffy, Spike
 extraships:Spike/Buffy
 
-[www.swi.org.ru]
-use_basic_cache:true
-
 [www.the-sietch.com]
 ## see [base_xenforoforum]
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/fanficfare/adapters/__init__.py 
new/FanFicFare-4.55.0/fanficfare/adapters/__init__.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/__init__.py       2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/__init__.py       2026-03-01 
16:25:11.000000000 +0100
@@ -120,7 +120,6 @@
 from . import adapter_wwwnovelallcom
 from . import adapter_hentaifoundrycom
 from . import adapter_mugglenetfanfictioncom
-from . import adapter_swiorgru
 from . import adapter_fanficsme
 from . import adapter_fanfictalkcom
 from . import adapter_scifistoriescom
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_adultfanfictionorg.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_adultfanfictionorg.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_adultfanfictionorg.py     
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_adultfanfictionorg.py     
2026-03-01 16:25:11.000000000 +0100
@@ -68,9 +68,7 @@
 
         # The date format will vary from site to site.
         # 
http://docs.python.org/library/datetime.html#strftime-strptime-behavior
-        self.dateformat = "%Y-%m-%d"
-
-
+        self.dateformat = "%B %d, %Y"
 
     ## Added because adult-fanfiction.org does send you to
     ## www.adult-fanfiction.org when you go to it and it also moves
@@ -139,91 +137,45 @@
     def getSiteURLPattern(self):
         return 
r'https?://(anime|anime2|bleach|books|buffy|cartoon|celeb|comics|ff|games|hp|inu|lotr|manga|movies|naruto|ne|original|tv|xmen|ygo|yuyu)\.adult-fanfiction\.org/story\.php\?no=\d+$'
 
-    ##This is not working right now, so I'm commenting it out, but leaving it 
for future testing
-    ## Login seems to be reasonably standard across eFiction sites.
-    #def needToLoginCheck(self, data):
-        ##This adapter will always require a login
-    #    return True
-
-#    <form name="login" method="post" action="">
-#      <div class="top">E-mail: <span id="sprytextfield1">
-#        <input name="email" type="text" id="email" size="20" maxlength="255" 
/>
-#        <span class="textfieldRequiredMsg">Email is required.</span><span 
class="textfieldInvalidFormatMsg">Invalid E-mail.</span></span></div>
-#      <div class="top">Password: <span id="sprytextfield2">
-#        <input name="pass1" type="password" id="pass1" size="20" 
maxlength="32" />
-#        <span class="textfieldRequiredMsg">password is required.</span><span 
class="textfieldMinCharsMsg">Minimum 8 characters8.</span><span 
class="textfieldMaxCharsMsg">Exceeded 32 characters.</span></span></div>
-#      <div class="top"><br /> <input name="loginsubmittop" type="hidden" 
id="loginsubmit" value="TRUE" />
-#        <input type="submit" value="Login" />
-#      </div>
-#    </form>
-
-
-    ##This is not working right now, so I'm commenting it out, but leaving it 
for future testing
-    #def performLogin(self, url, soup):
-    #    params = {}
-
-    #    if self.password:
-    #        params['email'] = self.username
-    #        params['pass1'] = self.password
-    #    else:
-    #        params['email'] = self.getConfig("username")
-    #        params['pass1'] = self.getConfig("password")
-    #    params['submit'] = 'Login'
-
-    #    # copy all hidden input tags to pick up appropriate tokens.
-    #    for tag in soup.find_all('input',{'type':'hidden'}):
-    #        params[tag['name']] = tag['value']
-
-    #    logger.debug("Will now login to URL {0} as {1} with password: 
{2}".format(url, params['email'],params['pass1']))
-
-    #    d = self.post_request(url, params, usecache=False)
-    #    d = self.post_request(url, params, usecache=False)
-    #    soup = self.make_soup(d)
-
-        #if not (soup.find('form', {'name' : 'login'}) == None):
-        #    logger.info("Failed to login to URL %s as %s" % (url, 
params['email']))
-        #    raise exceptions.FailedToLogin(url,params['email'])
-        #    return False
-        #else:
-    #    return True
-
     ## Getting the chapter list and the meta data, plus 'is adult' checking.
     def doExtractChapterUrlsAndMetadata(self, get_cover=True):
 
         ## You need to have your is_adult set to true to get this story
         if not (self.is_adult or self.getConfig("is_adult")):
             raise exceptions.AdultCheckRequired(self.url)
+        else:
+            d = 
self.post_request('https://www.adult-fanfiction.org/globals/ajax/age-verify.php',
 {"verify":"1"})
+            if "Age verified successfully" not in d:
+                raise exceptions.FailedToDownload("Failed to Verify Age: 
{0}".format(d))
 
         url = self.url
         logger.debug("URL: "+url)
 
         data = self.get_request(url)
+        # logger.debug(data)
 
         if "The dragons running the back end of the site can not seem to find 
the story you are looking for." in data:
             raise exceptions.StoryDoesNotExist("{0}.{1} says: The dragons 
running the back end of the site can not seem to find the story you are looking 
for.".format(self.zone, self.getBaseDomain()))
 
         soup = self.make_soup(data)
 
-        ##This is not working right now, so I'm commenting it out, but leaving 
it for future testing
-        #self.performLogin(url, soup)
-
-
         ## Title
         ## Some of the titles have a backslash on the story page, but not on 
the Author's page
         ## So I am removing it from the title, so it can be found on the 
Author's page further in the code.
         ## Also, some titles may have extra spaces '  ', and the search on the 
Author's page removes them,
         ## so I have to here as well. I used multiple replaces to make sure, 
since I did the same below.
-        a = soup.find('a', 
href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$"))
-        self.story.setMetadata('title',stripHTML(a).replace('\\','').replace(' 
 ',' ').replace('  ',' ').replace('  ',' ').strip())
-
-        # Find the chapters:
-        chapters = soup.find('ul',{'class':'dropdown-content'})
-        for i, chapter in enumerate(chapters.find_all('a')):
-            self.add_chapter(chapter,self.url+'&chapter='+unicode(i+1))
+        h1 = soup.find('h1')
+        # logger.debug("Title:%s"%h1)
+        
self.story.setMetadata('title',stripHTML(h1).replace('\\','').replace('  ',' 
').replace('  ',' ').replace('  ',' ').strip())
+
+        # Find the chapters from first list only
+        chapters = soup.select_one('select.chapter-select').select('option')
+        for chapter in chapters:
+            self.add_chapter(chapter,self.url+'&chapter='+chapter['value'])
 
 
         # Find authorid and URL from... author url.
-        a = soup.find('a', href=re.compile(r"profile.php\?no=\d+"))
+        a = soup.find('a', href=re.compile(r"profile.php\?id=\d+"))
         if a == None:
             # I know that the original author of fanficfare wants to always 
have metadata,
             # but I posit that if the story is there, even if we can't get the 
metadata from the
@@ -232,140 +184,56 @@
             
self.story.setMetadata('authorUrl','https://www.adult-fanfiction.org')
             self.story.setMetadata('author','Unknown')
             logger.warning('There was no author found for the story... 
Metadata will not be retreived.')
-            self.setDescription(url,'>>>>>>>>>> No Summary Given <<<<<<<<<<')
+            self.setDescription(url,'>>>>>>>>>> No Summary Given, Unknown 
Author <<<<<<<<<<')
         else:
             self.story.setMetadata('authorId',a['href'].split('=')[1])
             self.story.setMetadata('authorUrl',a['href'])
             self.story.setMetadata('author',stripHTML(a))
 
-            ##The story page does not give much Metadata, so we go to the 
Author's page
-
-            ##Get the first Author page to see if there are multiple pages.
-            ##AFF doesn't care if the page number is larger than the actual 
pages,
-            ##it will continue to show the last page even if the variable is 
larger than the actual page
-            author_Url = 
'{0}&view=story&zone={1}&page=1'.format(self.story.getMetadata('authorUrl'), 
self.zone)
-            #author_Url = 
self.story.getMetadata('authorUrl')+'&view=story&zone='+self.zone+'&page=1'
+            ## The story page does not give much Metadata, so we go to
+            ## the Author's page.  Except it's actually a sub-req for
+            ## list of author's stories for that subdomain
+            author_Url = 
'https://members.{0}/load-user-stories.php?subdomain={1}&uid={2}'.format(
+                self.getBaseDomain(),
+                self.zone,
+                self.story.getMetadata('authorId'))
 
-            ##I'm resetting the author page to the zone for this story
-            self.story.setMetadata('authorUrl',author_Url)
-
-            logger.debug('Getting the author page: {0}'.format(author_Url))
+            logger.debug('Getting the load-user-stories page: 
{0}'.format(author_Url))
             adata = self.get_request(author_Url)
 
-            if "The member you are looking for does not exist." in adata:
-                raise exceptions.StoryDoesNotExist("{0}.{1} says: The member 
you are looking for does not exist.".format(self.zone, self.getBaseDomain()))
-                #raise 
exceptions.StoryDoesNotExist(self.zone+'.'+self.getBaseDomain() +" says: The 
member you are looking for does not exist.")
+            none_found = "No stories found in this category."
+            if none_found in adata:
+                raise exceptions.StoryDoesNotExist("{0}.{1} says: 
{2}".format(self.zone, self.getBaseDomain(), none_found))
 
             asoup = self.make_soup(adata)
+            # logger.debug(asoup)
+
+            story_card = 
asoup.select_one('div.story-card:has(a[href="{0}"])'.format(url))
+            # logger.debug(story_card)
 
-            ##Getting the number of author pages
-            pages = 0
-            pagination=asoup.find('ul',{'class' : 'pagination'})
-            if pagination:
-                pages = pagination.find_all('li')[-1].find('a')
-                if not pages == None:
-                    pages = pages['href'].split('=')[-1]
-                else:
-                    pages = 0
-
-            storya = None
-            ##If there is only 1 page of stories, check it to get the Metadata,
-            if pages == 0:
-                a = asoup.find_all('li')
-                for lc2 in a:
-                    if lc2.find('a', 
href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$")):
-                        storya = lc2
-                        break
-            ## otherwise go through the pages
-            else:
-                page=1
-                i=0
-                while i == 0:
-                    ##We already have the first page, so if this is the first 
time through, skip getting the page
-                    if page != 1:
-                        author_Url = 
'{0}&view=story&zone={1}&page={2}'.format(self.story.getMetadata('authorUrl'), 
self.zone, unicode(page))
-                        logger.debug('Getting the author page: 
{0}'.format(author_Url))
-                        adata = self.get_request(author_Url)
-                        ##This will probably never be needed, since AFF 
doesn't seem to care what number you put as
-                        ## the page number, it will default to the last page, 
even if you use 1000, for an author
-                        ## that only hase 5 pages of stories, but I'm keeping 
it in to appease Saint Justin Case (just in case).
-                        if "The member you are looking for does not exist." in 
adata:
-                            raise exceptions.StoryDoesNotExist("{0}.{1} says: 
The member you are looking for does not exist.".format(self.zone, 
self.getBaseDomain()))
-                    # we look for the li element that has the story here
-                    asoup = self.make_soup(adata)
-
-                    a = asoup.find_all('li')
-                    for lc2 in a:
-                        if lc2.find('a', 
href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$")):
-                            i=1
-                            storya = lc2
-                            break
-                    page = page + 1
-                    if page > int(pages):
-                        break
-
-            ##Split the Metadata up into a list
-            ##We have to change the soup type to a string, then remove the 
newlines, and double spaces,
-            ##then changes the <br/> to '-:-', which seperates the different 
elemeents.
-            ##Then we strip the HTML elements from the string.
-            ##There is also a double <br/>, so we have to fix that, then 
remove the leading and trailing '-:-'.
-            ##They are always in the same order.
-            ## EDIT 09/26/2016: Had some trouble with unicode errors... so I 
had to put in the decode/encode parts to fix it
-            liMetadata = 
unicode(storya).replace('\n','').replace('\r','').replace('\t',' ').replace('  
',' ').replace('  ',' ').replace('  ',' ')
-            liMetadata = 
stripHTML(liMetadata.replace(r'<br/>','-:-').replace('<!-- <br /-->','-:-'))
-            liMetadata = liMetadata.strip('-:-').strip('-:-').encode('utf-8')
-            for i, value in enumerate(liMetadata.decode('utf-8').split('-:-')):
-                if i == 0:
-                    # The value for the title has been manipulated, so may not 
be the same as gotten at the start.
-                    # I'm going to use the href from the storya retrieved from 
the author's page to determine if it is correct.
-                    if storya.find('a', 
href=re.compile(r'story.php\?no='+self.story.getMetadata('storyId')+"$"))['href']
 != url:
-                        raise exceptions.StoryDoesNotExist('Did not find story 
in author story list: {0}'.format(author_Url))
-                elif i == 1:
-                    ##Get the description
-                    self.setDescription(url,stripHTML(value.strip()))
-                else:
-                    # the rest of the values can be missing, so instead of 
hardcoding the numbers, we search for them.
-                    if 'Located :' in value:
-                        
self.story.setMetadata('category',value.replace(r'&gt;',r'>').replace(r'Located 
:',r'').strip())
-                    elif 'Category :' in value:
-                        # Get the Category
-                        
self.story.setMetadata('category',value.replace(r'&gt;',r'>').replace(r'Located 
:',r'').strip())
-                    elif 'Content Tags :' in value:
-                        # Get the Erotic Tags
-                        value = stripHTML(value.replace(r'Content Tags 
:',r'')).strip()
-                        for code in re.split(r'\s',value):
-                            self.story.addToList('eroticatags',code)
-                    elif 'Posted :' in value:
-                        # Get the Posted Date
-                        value = value.replace(r'Posted :',r'').strip()
-                        if value.startswith('008'):
-                            # It is unknown how the 200 became 008, but I'm 
going to change it back here
-                            value = value.replace('008','200')
-                        elif value.startswith('0000'):
-                            # Since the date is showing as 0000,
-                            # I'm going to put the memberdate here
-                            value = 
asoup.find('div',{'id':'contentdata'}).find('p').get_text(strip=True).replace('Member
 Since','').strip()
-                        self.story.setMetadata('datePublished', 
makeDate(stripHTML(value), self.dateformat))
-                    elif 'Edited :' in value:
-                        # Get the 'Updated' Edited date
-                        # AFF has the time for the Updated date, and we only 
want the date,
-                        # so we take the first 10 characters only
-                        value = value.replace(r'Edited :',r'').strip()[0:10]
-                        if value.startswith('008'):
-                            # It is unknown how the 200 became 008, but I'm 
going to change it back here
-                            value = value.replace('008','200')
-                            self.story.setMetadata('dateUpdated', 
makeDate(stripHTML(value), self.dateformat))
-                        elif value.startswith('0000') or '-00-' in value:
-                            # Since the date is showing as 0000,
-                            # or there is -00- in the date,
-                            # I'm going to put the Published date here
-                            self.story.setMetadata('dateUpdated', 
self.story.getMetadata('datPublished'))
-                        else:
-                            self.story.setMetadata('dateUpdated', 
makeDate(stripHTML(value), self.dateformat))
-                    else:
-                        # This catches the blank elements, and the Review and 
Dragon Prints.
-                        # I am not interested in these, so do nothing
-                        zzzzzzz=0
+            ## Category
+            ## I've only seen one category per story so far, but just in case:
+            for cat in story_card.select('div.story-card-category'):
+                # remove Category:, old code suggests Located: is also
+                # possible, so removing by <strong>
+                cat.find("strong").decompose()
+                self.story.addToList('category',stripHTML(cat))
+
+            
self.setDescription(url,story_card.select_one('div.story-card-description'))
+
+            for tag in story_card.select('span.story-tag'):
+                self.story.addToList('eroticatags',stripHTML(tag))
+
+            ## created/updates share formatting
+            for meta in story_card.select('div.story-card-meta-item 
span:last-child'):
+                meta = stripHTML(meta)
+                if 'Created: ' in meta:
+                    meta = meta.replace('Created: ','')
+                    self.story.setMetadata('datePublished', makeDate(meta, 
self.dateformat))
+
+                if 'Updated: ' in meta:
+                    meta = meta.replace('Updated: ','')
+                    self.story.setMetadata('dateUpdated', makeDate(meta, 
self.dateformat))
 
     # grab the text for an individual chapter.
     def getChapterText(self, url):
@@ -373,10 +241,11 @@
         logger.debug('Getting chapter text from: %s' % url)
 
         soup = self.make_soup(self.get_request(url))
-        chaptertag = 
soup.find('ul',{'class':'pagination'}).parent.parent.parent.findNextSibling('li')
+        chaptertag = soup.select_one('div.chapter-body')
         if None == chaptertag:
             raise exceptions.FailedToDownload("Error downloading Chapter: {0}! 
 Missing required element!".format(url))
-        # Change td to a div.
-        chaptertag.name='div'
+        ## chapter text includes a copy of story title, author,
+        ## chapter title, & eroticatags specific to the chapter.  Did
+        ## before, too.
 
         return self.utf8FromSoup(url,chaptertag)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_bloodshedversecom.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_bloodshedversecom.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_bloodshedversecom.py      
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_bloodshedversecom.py      
2026-03-01 16:25:11.000000000 +0100
@@ -157,9 +157,6 @@
 
                         self.story.addToList('warnings', warning)
 
-                elif key == 'Chapters':
-                    self.story.setMetadata('numChapters', int(value))
-
                 elif key == 'Words':
                     # Apparently only numChapters need to be an integer for
                     # some strange reason. Remove possible ',' characters as to
@@ -174,7 +171,7 @@
                     # ugly %p(am/pm) hack moved into makeDate so other sites 
can use it.
                     self.story.setMetadata('dateUpdated', date)
 
-        if self.story.getMetadata('rating') == 'NC-17' and not (self.is_adult 
or self.getConfig('is_adult')):
+        if self.story.getMetadataRaw('rating') == 'NC-17' and not 
(self.is_adult or self.getConfig('is_adult')):
             raise exceptions.AdultCheckRequired(self.url)
 
     def getChapterText(self, url):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fanficauthorsnet.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fanficauthorsnet.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fanficauthorsnet.py       
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fanficauthorsnet.py       
2026-03-01 16:25:11.000000000 +0100
@@ -202,7 +202,7 @@
         ## Raising AdultCheckRequired after collecting chapters gives
         ## a double chapter list.  So does genre, but it de-dups
         ## automatically.
-        if( self.story.getMetadata('rating') == 'Mature'
+        if( self.story.getMetadataRaw('rating') in ['Mature','Adult Only']
             and not (self.is_adult or self.getConfig("is_adult")) ):
             raise exceptions.AdultCheckRequired(self.url)
 
@@ -226,7 +226,7 @@
     # grab the text for an individual chapter.
     def getChapterText(self, url):
         logger.debug('Getting chapter text from: %s' % url)
-        if( self.story.getMetadata('rating') == 'Mature' and
+        if( self.story.getMetadataRaw('rating') in ['Mature','Adult Only'] and
             (self.is_adult or self.getConfig("is_adult")) ):
             addurl = "?bypass=1"
         else:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fanficsme.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fanficsme.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fanficsme.py      
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fanficsme.py      
2026-03-01 16:25:11.000000000 +0100
@@ -150,7 +150,7 @@
         
self.story.setMetadata('rating',stripHTML(get_meta_content(u'Рейтинг')))
 
         ## Need to login for any rating higher than General.
-        if self.story.getMetadata('rating') != 'General' and 
self.needToLoginCheck(data):
+        if self.story.getMetadataRaw('rating') != 'General' and 
self.needToLoginCheck(data):
             self.performLogin(url)
             # reload after login.
             data = self.get_request(url,usecache=False)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_ficbooknet.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_ficbooknet.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_ficbooknet.py     
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_ficbooknet.py     
2026-03-01 16:25:11.000000000 +0100
@@ -157,7 +157,6 @@
                 update = chapterdate
         else:
             self.add_chapter(self.story.getMetadata('title'),url)
-            self.story.setMetadata('numChapters',1)
             date_str = soup.find('div', {'class' : 'part-date'}).find('span', 
{'title': True})['title'].replace(u"\u202fг. в", "")
             for month_name, month_num in fullmon.items():
                 date_str = date_str.replace(month_name, month_num)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fictionlive.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fictionlive.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fictionlive.py    
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fictionlive.py    
2026-03-01 16:25:11.000000000 +0100
@@ -173,7 +173,7 @@
 
         tags = data['ta'] if 'ta' in data else []
 
-        if (self.story.getMetadata('rating') in {"nsfw", "adult"} or 'smut' in 
tags) and \
+        if (self.story.getMetadataRaw('rating') in {"nsfw", "adult"} or 'smut' 
in tags) and \
            not (self.is_adult or self.getConfig("is_adult")):
             raise exceptions.AdultCheckRequired(self.url)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fictionmaniatv.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fictionmaniatv.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fictionmaniatv.py 
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fictionmaniatv.py 
2026-03-01 16:25:11.000000000 +0100
@@ -40,10 +40,6 @@
         self._setURL(self.READ_TEXT_STORY_URL_TEMPLATE % story_id)
         self.story.setMetadata('siteabbrev', self.SITE_ABBREVIATION)
 
-        # Always single chapters, probably should use the Anthology feature to
-        # merge chapters of a story
-        self.story.setMetadata('numChapters', 1)
-
     @staticmethod
     def getSiteDomain():
         return FictionManiaTVAdapter.SITE_DOMAIN
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_ficwadcom.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_ficwadcom.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_ficwadcom.py      
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_ficwadcom.py      
2026-03-01 16:25:11.000000000 +0100
@@ -114,7 +114,7 @@
         titleh4 = soup.find('div',{'class':'storylist'}).find('h4')
         self.story.setMetadata('title', stripHTML(titleh4.a))
 
-        if 'Deleted story' in self.story.getMetadata('title'):
+        if 'Deleted story' in self.story.getMetadataRaw('title'):
             raise exceptions.StoryDoesNotExist("This story was deleted. 
%s"%self.url)
 
         # Find authorid and URL from... author url.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fimfictionnet.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fimfictionnet.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fimfictionnet.py  
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fimfictionnet.py  
2026-03-01 16:25:11.000000000 +0100
@@ -151,7 +151,8 @@
         self.story.setMetadata("authorId", author['href'].split('/')[2])
         self.story.setMetadata("authorUrl", "https://%s/user/%s/%s"; % 
(self.getSiteDomain(),
                                                                        
self.story.getMetadata('authorId'),
-                                                                       
self.story.getMetadata('author')))
+                                                                       # meta 
entry author can be changed by the user.
+                                                                       
stripHTML(author)))
 
         #Rating text is replaced with full words for historical compatibility 
after the site changed
         #on 2014-10-27
@@ -183,7 +184,8 @@
             if storyImage:
                 coverurl = storyImage['data-fullsize']
                 # try setting from data-fullsize, if fails, try using data-src
-                if 
self.setCoverImage(self.url,coverurl)[0].startswith("failedtoload"):
+                cover_set = self.setCoverImage(self.url,coverurl)[0]
+                if not cover_set or cover_set.startswith("failedtoload"):
                     coverurl = storyImage['src']
                     self.setCoverImage(self.url,coverurl)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fireflyfansnet.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fireflyfansnet.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_fireflyfansnet.py 
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_fireflyfansnet.py 
2026-03-01 16:25:11.000000000 +0100
@@ -105,7 +105,6 @@
         # to download them one at a time yourself. I'm also setting the status 
to
         # complete
         self.add_chapter(self.story.getMetadata('title'), self.url)
-        self.story.setMetadata('numChapters', 1)
         self.story.setMetadata('status', 'Completed')
 
         ## some stories do not have a summary listed, so I'm setting it here.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_kakuyomujp.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_kakuyomujp.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_kakuyomujp.py     
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_kakuyomujp.py     
2026-03-01 16:25:11.000000000 +0100
@@ -197,8 +197,6 @@
                     self.add_chapter(epTitle, epUrl)
                 newSection = False
 
-        self.story.setMetadata('numChapters', numEpisodes)
-
         logger.debug("Story: <%s>", self.story)
         return
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_literotica.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_literotica.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_literotica.py     
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_literotica.py     
2026-03-01 16:25:11.000000000 +0100
@@ -99,7 +99,7 @@
     ## apply clean_chapter_titles
     def add_chapter(self,chapter_title,url,othermeta={}):
         if self.getConfig("clean_chapter_titles"):
-            storytitle = self.story.getMetadata('title').lower()
+            storytitle = self.story.getMetadataRaw('title').lower()
             chapter_name_type = None
             # strip trailing ch or pt before doing the chapter clean.
             # doesn't remove from story title metadata
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_masseffect2in.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_masseffect2in.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_masseffect2in.py  
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_masseffect2in.py  
2026-03-01 16:25:11.000000000 +0100
@@ -162,7 +162,7 @@
                     self.story.extendList('authorId', [authorId])
                     self.story.extendList('authorUrl', [authorUrl])
 
-                if not self.story.getMetadata('rating'):
+                if not self.story.getMetadataRaw('rating'):
                     ratingTitle = chapter.getRatingTitle()
                     if ratingTitle:
                         self.story.setMetadata('rating', ratingTitle)
@@ -204,7 +204,6 @@
         self.story.setMetadata('datePublished', datePublished)
         self.story.setMetadata('dateUpdated', dateUpdated)
         self.story.setMetadata('numWords', unicode(wordCount))
-        self.story.setMetadata('numChapters', len(chapters))
 
         # Site-specific metadata.
         self.story.setMetadata('language', self.SITE_LANGUAGE)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_royalroadcom.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_royalroadcom.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_royalroadcom.py   
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_royalroadcom.py   
2026-03-01 16:25:11.000000000 +0100
@@ -289,7 +289,8 @@
         if img:
             cover_url = img['src']
             # usually URL is for thumbnail. Try expected URL for larger image, 
if fails fall back to the original URL
-            if self.setCoverImage(url,cover_url.replace('/covers-full/', 
'/covers-large/'))[0].startswith("failedtoload"):
+            cover_set = 
self.setCoverImage(url,cover_url.replace('/covers-full/', '/covers-large/'))[0]
+            if not cover_set or cover_set.startswith("failedtoload"):
                 self.setCoverImage(url,cover_url)
                     # some content is show as tables, this will preserve them
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_swiorgru.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_swiorgru.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_swiorgru.py       
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_swiorgru.py       
1970-01-01 01:00:00.000000000 +0100
@@ -1,144 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import
-import logging
-logger = logging.getLogger(__name__)
-import re
-
-
-from ..htmlcleanup import stripHTML
-from .. import exceptions as exceptions
-
-# py2 vs py3 transition
-
-from .base_adapter import BaseSiteAdapter,  makeDate
-
-
-def getClass():
-    return SwiOrgRuAdapter
-
-
-logger = logging.getLogger(__name__)
-
-class SwiOrgRuAdapter(BaseSiteAdapter):
-
-    def __init__(self, config, url):
-        BaseSiteAdapter.__init__(self, config, url)
-
-        self.username = "NoneGiven" # if left empty, site doesn't return any 
message at all.
-        self.password = ""
-        self.is_adult=False
-        storyId = self.parsedUrl.path.split('/',)[3]
-        self.story.setMetadata('storyId', storyId)
-
-        # normalized story URL.
-        self._setURL('http://' + self.getSiteDomain() + 
'/mlp-fim/story/'+self.story.getMetadata('storyId'))
-
-        # Each adapter needs to have a unique site abbreviation.
-        self.story.setMetadata('siteabbrev','swiorgru')
-
-        # The date format will vary from site to site.
-        # 
http://docs.python.org/library/datetime.html#strftime-strptime-behavior
-        self.dateformat = "%Y.%m.%d"
-
-
-    @staticmethod # must be @staticmethod, don't remove it.
-    def getSiteDomain():
-        return 'www.swi.org.ru'
-
-    @classmethod
-    def getSiteExampleURLs(cls):
-        return "http://"; + cls.getSiteDomain() + "/mlp-fim/story/11341/ 
http://"; + cls.getSiteDomain() + "/mlp-fim/story/11341/chapter1.html"
-
-    def getSiteURLPattern(self):
-        return r"http://"; + re.escape(self.getSiteDomain() + 
"/mlp-fim/story/")+r"\d+"
-
-    def extractChapterUrlsAndMetadata(self):
-        url=self.url
-        logger.debug("URL: "+url)
-        data = self.get_request(url)
-
-        soup = self.make_soup(data)
-
-        title = soup.find('h1')
-        for tag in title.find_all('sup'):
-            tag.extract()
-
-        self.story.setMetadata('title', stripHTML(title.text))
-        logger.debug("Title: (%s)"%self.story.getMetadata('title'))
-
-        author_title = soup.find('strong', string = re.compile(u"Автор: "))
-        if author_title == None:
-            raise exceptions.FailedToDownload("Error downloading page: %s! 
Missing required author_title element!" % url)
-
-        author = author_title.next_sibling
-
-        self.story.setMetadata('authorId', author.text) # Author's name is 
unique
-        self.story.setMetadata('authorUrl','http://'+self.host + 
author['href'])
-        self.story.setMetadata('author', author.text)
-        logger.debug("Author: (%s)"%self.story.getMetadata('author'))
-
-        date_pub = soup.find('em', string = re.compile(r'\d{4}.\d{2}.\d{2}'))
-        if not date_pub == None:
-            self.story.setMetadata('datePublished', makeDate(date_pub.text, 
self.dateformat))
-
-        rating_label = soup.find('strong', string = re.compile(u"рейтинг:"))
-        if not rating_label == None:
-            rating = rating_label.next_sibling.next_sibling
-            self.story.setMetadata('rating', stripHTML(rating))
-
-            if not self.is_adult or self.getConfig("is_adult"):
-                if "NC-18" in rating:
-                    raise exceptions.AdultCheckRequired(self.url)
-
-        characters = soup.find_all('img', 
src=re.compile(r"/mlp-fim/img/chars/\d+.png"))
-        logger.debug("numCharacters: (%s)"%str(len(characters)))
-
-        for x in range(0,len(characters)):
-            character=characters[x]
-            self.story.addToList('characters', character['title'])
-
-        if soup.find('font', color = r"green", string = u"завершен"):
-            self.story.setMetadata('status', 'Completed')
-        else:
-            self.story.setMetadata('status', 'In-Progress')
-
-        categories_label = soup.find('strong', string = u"категории:")
-        if not categories_label == None:
-            categories_element = categories_label.next_sibling.next_sibling
-            categories = re.findall(r'"(.+?)"', categories_element.text)
-            for x in range(0, len(categories)):
-                category=categories[x]
-                self.story.addToList('category', category)
-
-        chapters_header = soup.find('h2', string = re.compile(u"Главы:"))
-        if chapters_header==None:
-            raise exceptions.FailedToDownload("Error downloading page: %s! 
Missing required chapters_header element!" % url)
-
-        chapters_table = chapters_header.next_sibling.next_sibling
-
-        self.story.setMetadata('language','Russian')
-
-        chapters=chapters_table.find_all('a', 
href=re.compile(r'/mlp-fim/story/'+self.story.getMetadata('storyId')+r"/chapter\d+"))
-        self.story.setMetadata('numChapters', len(chapters))
-        logger.debug("numChapters: 
(%s)"%str(self.story.getMetadata('numChapters')))
-
-        for x in range(0,len(chapters)):
-                chapter=chapters[x]
-                churl='http://'+self.host+chapter['href']
-                self.add_chapter(chapter,churl)
-
-    # grab the text for an individual chapter.
-    def getChapterText(self, url):
-        logger.debug('Getting chapter text from: %s' % url)
-        soup = self.make_soup(self.get_request(url))
-        chapter = soup.find('div', {'id' : 'content'})
-
-        chapter_header = chapter.find('h1', id = re.compile("chapter"))
-        if not chapter_header == None:
-            chapter_header.decompose()
-
-        if chapter == None:
-            raise exceptions.FailedToDownload("Error downloading Chapter: %s!  
Missing required element!" % url)
-
-        return self.utf8FromSoup(url,chapter)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_syosetucom.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_syosetucom.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_syosetucom.py     
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_syosetucom.py     
2026-03-01 16:25:11.000000000 +0100
@@ -255,7 +255,6 @@
             numChapters = int(re.sub(r'[^\d]', '', infoSoup.find('span', 
{'class':'p-infotop-type__allep'}).text.strip()))
             oneshot = False
             completed = True if noveltype == '完結済' else False
-        self.story.setMetadata('numChapters', numChapters)
         self.story.setMetadata('status', 'Completed' if completed else 
'In-Progress')
 
         # Keywords
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_test1.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_test1.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_test1.py  2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_test1.py  2026-03-01 
16:25:11.000000000 +0100
@@ -149,20 +149,20 @@
 
         # greater than 10, no language or series.
         if idnum < 10:
-            ## non-English was changing series sort order which
-            ## confuses me more often than I test other langs.
-            # langs = {
-            #     0:"English",
-            #     1:"Russian",
-            #     2:"French",
-            #     3:"German",
-            #     }
-            # self.story.setMetadata('language',langs[idnum%len(langs)])
             self.setSeries('The Great Test',idnum)
             
self.story.setMetadata('seriesUrl','http://'+self.getSiteDomain()+'/seriesid=1')
         elif idnum < 20:
             self.setSeries('魔法少女まどか★マギカ',idnum)
             
self.story.setMetadata('seriesUrl','http://'+self.getSiteDomain()+'/seriesid=1')
+        elif idnum < 30:
+            langs = {
+                0:"English",
+                1:"Russian",
+                2:"French",
+                3:"German",
+                }
+            self.story.setMetadata('language',langs[idnum%len(langs)])
+
         if idnum == 0:
             self.setSeries("A Nook Hyphen Test 
"+self.story.getMetadata('dateCreated'),idnum)
             
self.story.setMetadata('seriesUrl','http://'+self.getSiteDomain()+'/seriesid=0')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_touchfluffytail.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_touchfluffytail.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_touchfluffytail.py        
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_touchfluffytail.py        
2026-03-01 16:25:11.000000000 +0100
@@ -101,7 +101,6 @@
 
         self.story.setMetadata('status', 'Completed')
         self.add_chapter(self.story.getMetadata('title'),url)
-        self.story.setMetadata('numChapters',1)
 
         avrrate = body.find_all('footer', 
class_='entry-meta')[1].find('em').span.find_all('strong')
         averrating = avrrate[1].text
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_trekfanfictionnet.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_trekfanfictionnet.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_trekfanfictionnet.py      
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_trekfanfictionnet.py      
2026-03-01 16:25:11.000000000 +0100
@@ -126,11 +126,6 @@
         ## url since we can't get the chapter without this, I'm leaving it in.
         self.add_chapter(self.story.getMetadata('title'), url)
 
-        ## I'm going to comment this out, because thereis always only one 
chapter for each story,
-        ## so this is really not needed
-        ## And I am uncommenting it because the rest of FFF expects
-        ## there to always be numChapters, even if it's one. --Jimm
-
         # getting the rest of the metadata... there isn't much here, and the 
summary can only be
         # gotten on the author's page... so we'll get it to get the 
information from
         adata = self.get_request(self.story.getMetadata('authorUrl'))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/adapter_voracity2eficcom.py 
new/FanFicFare-4.55.0/fanficfare/adapters/adapter_voracity2eficcom.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/adapter_voracity2eficcom.py       
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/adapter_voracity2eficcom.py       
2026-03-01 16:25:11.000000000 +0100
@@ -199,9 +199,6 @@
                 self.story.setMetadata('series', a.string)
                 self.story.setMetadata('seriesUrl', 
urlparse.urljoin(self.BASE_URL, a['href']))
 
-            elif key == 'Chapter':
-                self.story.setMetadata('numChapters', int(value))
-
             elif key == 'Completed':
                 self.story.setMetadata('status', 'Completed' if value == 'Yes' 
else 'In-Progress')
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/base_adapter.py 
new/FanFicFare-4.55.0/fanficfare/adapters/base_adapter.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/base_adapter.py   2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/base_adapter.py   2026-03-01 
16:25:11.000000000 +0100
@@ -670,6 +670,7 @@
         return url in self.add_img_names
 
     def include_css_urls(self,parenturl,style):
+        FONT_EXTS = ('ttf','otf','woff','woff2')
         # logger.debug("include_css_urls(%s,%s)"%(parenturl,style))
         ## pass in the style string, will be returned with URLs
         ## replaced and images will be added.
@@ -680,12 +681,16 @@
             ## url('href')
             ## the pattern will also accept mismatched '/", which is broken 
CSS.
             for style_url in re.findall(r'url\([\'"]?(.*?)[\'"]?\)', style):
-                logger.debug("Adding style url(%s)"%style_url)
                 ## additional_images don't get processing.  Applies
                 ## only to CSS url(), that should be the only time
                 ## additional_images is used.
                 if self.is_additional_image(style_url):
+                    logger.debug("Skipping sheet style url(%s), in 
additional_images"%style_url)
+                    continue
+                if style_url.rsplit('.')[-1].lower() in FONT_EXTS:
+                    logger.debug("Skipping sheet style url(%s), assumed 
font"%style_url)
                     continue
+                logger.debug("Adding style url(%s)"%style_url)
 
                 try:
                     # longdesc(aka origurl) isn't saved anywhere in CSS.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/base_efiction_adapter.py 
new/FanFicFare-4.55.0/fanficfare/adapters/base_efiction_adapter.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/base_efiction_adapter.py  
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/base_efiction_adapter.py  
2026-03-01 16:25:11.000000000 +0100
@@ -317,8 +317,6 @@
             for val in re.split(r"\s*,\s*", value):
                 # TODO this should be an official field I guess
                 self.story.addToList('challenge', val)
-        elif key == 'Chapters':
-            self.story.setMetadata('numChapters', int(value))
         elif key == 'Rating' or key == 'Rated':
             self.story.setMetadata('rating', value)
         elif key == 'Word count':
@@ -446,7 +444,7 @@
         if sn:
             self.story.setMetadata('storynotes', stripHTML(sn))
 
-        if not self.story.getMetadata('rating'):
+        if not self.story.getMetadataRaw('rating'):
             self.getRatingFromTOC();
 
         ## Chapter URLs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/FanFicFare-4.54.0/fanficfare/adapters/base_otw_adapter.py 
new/FanFicFare-4.55.0/fanficfare/adapters/base_otw_adapter.py
--- old/FanFicFare-4.54.0/fanficfare/adapters/base_otw_adapter.py       
2026-02-01 16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/adapters/base_otw_adapter.py       
2026-03-01 16:25:11.000000000 +0100
@@ -320,7 +320,6 @@
         # break epub update.
         # Find the chapters:
         chapters=soup.find_all('a', 
href=re.compile(r'/works/'+self.story.getMetadata('storyId')+r"/chapters/\d+$"))
-        self.story.setMetadata('numChapters',len(chapters))
         logger.debug("numChapters: (%s)"%self.story.getMetadata('numChapters'))
         if len(chapters)==1:
             
self.add_chapter(self.story.getMetadata('title'),'https://'+self.host+chapters[0]['href'])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/fanficfare/cli.py 
new/FanFicFare-4.55.0/fanficfare/cli.py
--- old/FanFicFare-4.54.0/fanficfare/cli.py     2026-02-01 16:04:34.000000000 
+0100
+++ new/FanFicFare-4.55.0/fanficfare/cli.py     2026-03-01 16:25:11.000000000 
+0100
@@ -28,7 +28,7 @@
 import os, sys, platform
 
 
-version="4.54.0"
+version="4.55.0"
 os.environ['CURRENT_VERSION_ID']=version
 
 global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/fanficfare/configurable.py 
new/FanFicFare-4.55.0/fanficfare/configurable.py
--- old/FanFicFare-4.54.0/fanficfare/configurable.py    2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/configurable.py    2026-03-01 
16:25:11.000000000 +0100
@@ -139,19 +139,6 @@
             allowedsections.append('%s:%s'%(section,f))
     return allowedsections
 
-def get_valid_list_entries():
-    return list(['category',
-                 'genre',
-                 'characters',
-                 'ships',
-                 'warnings',
-                 'extratags',
-                 'author',
-                 'authorId',
-                 'authorUrl',
-                 'lastupdate',
-                 ])
-
 boollist=['true','false']
 base_xenforo2_list=['base_xenforo2forum',
                    'forums.sufficientvelocity.com',
@@ -188,7 +175,7 @@
 
     This is to further restrict keywords to certain sections and/or
     values.  get_valid_keywords() below is the list of allowed
-    keywords.  Any keyword listed here must also be listed there.
+    keywords.  Any keyword not listed here must be listed there.
 
     This is what's used by the code when you save personal.ini in
     plugin that stops and points out possible errors in keyword
@@ -343,145 +330,84 @@
 
     return dict(valdict)
 
-def get_valid_scalar_entries():
-    return list(['series',
-                 'seriesUrl',
-                 'language',
-                 'status',
-                 'datePublished',
-                 'dateUpdated',
-                 'dateCreated',
-                 'rating',
-                 'numChapters',
-                 'numWords',
-                 'words_added', # logpage only.
-                 'marked_new_chapters',
-                 'site',
-                 'publisher',
-                 'storyId',
-                 'title',
-                 'titleHTML',
-                 'storyUrl',
-                 'sectionUrl',
-                 'description',
-                 'formatname',
-                 'formatext',
-                 'siteabbrev',
-                 'version',
-                 # internal stuff.
-                 'authorHTML',
-                 'seriesHTML',
-                 'langcode',
-                 'output_css',
-                 'cover_image',
-                 'newforanthology' # internal for plugin anthologies
-                                   # to mark chapters (new) in new
-                                   # stories
-                 ])
-
-def get_valid_entries():
-    return get_valid_list_entries() + get_valid_scalar_entries()
-
 # *known* keywords -- or rather regexps for them.
 def get_valid_keywords():
     '''
     Among other things, this list is used by the color highlighting in
-    personal.ini editing in plugin.  Note that it's separate from
-    value checking and most keywords need to be added to both.
+    personal.ini editing in plugin.  Note that entries in
+    get_valid_set_options() do not need to be duplicated here anymore.
     '''
-    return list(['(in|ex)clude_metadata_(pre|post)',
-                 'add_chapter_numbers',
-                 'add_genre_when_multi_category',
+    return list(get_valid_set_options().keys())+\
+                ['(in|ex)clude_metadata_(pre|post)',
                  'add_category_when_multi_category',
+                 'add_genre_when_multi_category',
                  'adult_ratings',
                  'allow_unsafe_filename',
                  'always_overwrite',
+                 'anthology_merge_keepsingletocs',
                  'anthology_tags',
                  'anthology_title_pattern',
-                 'anthology_merge_keepsingletocs',
                  'background_color',
-                 'bulk_load',
+                 'browser_cache_age_limit',
                  'chapter_end',
                  'chapter_start',
-                 'chapter_title_strip_pattern',
-                 'chapter_title_def_pattern',
                  'chapter_title_add_pattern',
-                 'chapter_title_new_pattern',
                  'chapter_title_addnew_pattern',
-                 'title_chapter_range_pattern',
-                 'mark_new_chapters',
-                 'check_next_chapter',
-                 'meta_from_last_chapter',
-                 'skip_author_cover',
-                 'try_shortened_title_urls',
-                 'collect_series',
+                 'chapter_title_def_pattern',
+                 'chapter_title_error_mark',
+                 'chapter_title_new_pattern',
+                 'chapter_title_strip_pattern',
+                 'chardet_confidence_limit',
                  'comma_entries',
                  'connect_timeout',
+                 'continue_on_chapter_error_try_limit',
                  'convert_images_to',
                  'cover_content',
                  'cover_exclusion_regexp',
+                 'cover_min_size',
                  'custom_columns_settings',
                  'dateCreated_format',
                  'datePublished_format',
                  'dateUpdated_format',
+                 'datethreadmark_format',
                  'default_cover_image',
-                 'force_cover_image',
-                 'force_img_self_referer_regexp',
                  'description_limit',
-                 'do_update_hook',
-                 'use_archived_author',
-                 'use_view_full_work',
-                 'use_workskin',
-                 'always_login',
-                 'exclude_notes',
-                 'remove_authorfootnotes_on_update',
-                 'use_archive_transformativeworks_org',
-                 'use_archiveofourown_gay',
+                 'epub_version',
                  'exclude_editor_signature',
+                 'exclude_notes',
                  'extra_logpage_entries',
                  'extra_subject_tags',
                  'extra_titlepage_entries',
                  'extra_valid_entries',
-                 'extratags',
                  'extracategories',
-                 'extragenres',
                  'extracharacters',
+                 'extragenres',
                  'extraships',
+                 'extratags',
                  'extrawarnings',
                  'fail_on_password',
                  'file_end',
                  'file_start',
                  'fileformat',
                  'find_chapters',
-                 'fix_fimf_blockquotes',
-                 'keep_prequel_in_description',
-                 'scrape_bookshelf',
-                 'include_author_notes',
+                 'fix_pseudo_html',
+                 'flaresolverr_proxy_address',
+                 'flaresolverr_proxy_port',
+                 'flaresolverr_proxy_protocol',
+                 'flaresolverr_proxy_timeout',
+                 'flaresolverr_session',
+                 'force_cover_image',
+                 'force_img_self_referer_regexp',
                  'force_login',
                  'generate_cover_settings',
-                 'grayscale_images',
+                 'http_proxy',
+                 'https_proxy',
+                 'ignore_chapter_url_list',
                  'image_max_size',
-                 'include_images',
-                 'jpg_quality',
-                 'additional_images',
-                 'include_logpage',
-                 'logpage_at_end',
-                 'calibre_series_meta',
-                 'force_update_epub_always',
-                 'page_progression_direction_rtl',
                  'include_subject_tags',
-                 'include_titlepage',
-                 'include_tocpage',
-                 'chardet_confidence_limit',
-                 'is_adult',
                  'join_string_authorHTML',
-                 'keep_style_attr',
-                 'keep_title_attr',
-                 'keep_html_attrs',
-                 'remove_class_chapter',
-                 'replace_tags_with_spans',
                  'keep_empty_tags',
-                 'remove_tags',
+                 'keep_html_attrs',
                  'keep_summary_html',
                  'logpage_end',
                  'logpage_entries',
@@ -490,156 +416,127 @@
                  'logpage_update_end',
                  'logpage_update_start',
                  'make_directories',
-                 'make_firstimage_cover',
-                 'use_old_cover',
                  'make_linkhtml_entries',
                  'max_fg_sleep',
                  'max_fg_sleep_at_downloads',
+                 'max_zalgo',
                  'min_fg_sleep',
-                 'never_make_cover',
-                 'cover_min_size',
-                 'no_image_processing',
                  'no_image_processing_regexp',
-                 'dedup_img_files',
-                 'convert_inline_images',
-                 'non_breaking_spaces',
-                 'download_text_version',
-                 'nook_img_fix',
+                 'nsapa_proxy_address',
+                 'nsapa_proxy_port',
+                 'order_threadmarks_by_date_categories',
                  'output_css',
                  'output_filename',
                  'output_filename_safepattern',
                  'password',
                  'post_process_cmd',
                  'rating_titles',
+                 'reader_posts_per_page',
+                 'remove_tags',
                  'remove_transparency',
-                 'replace_br_with_p',
                  'replace_chapter_text',
-                 'replace_hr',
-                 'remove_empty_p',
-                 'replace_xbr_with_hr',
                  'replace_metadata',
+                 'replace_tags_with_spans',
+                 'replace_xbr_with_hr',
+                 'show_spoiler_tags',
+                 'skip_threadmarks_categories',
                  'slow_down_sleep_time',
-                 'sort_ships',
                  'sort_ships_splits',
-                 'strip_chapter_numbers',
                  'strip_chapter_numeral',
-                 'strip_text_links',
-                 'centeredcat_to_characters',
-                 'pairingcat_to_characters_ships',
-                 'romancecat_to_characters_ships',
-                 'use_meta_keywords',
-                 'clean_chapter_titles',
-                 'conditionals_use_lists',
-                 'description_in_chapter',
-                 'order_chapters_by_date',
-                 'fetch_stories_from_api',
-                 'tags_from_chapters',
-                 'dates_from_chapters',
-                 'include_chapter_descriptions_in_summary',
-                 'inject_chapter_title',
-                 'inject_chapter_image',
-                 'append_datepublished_to_storyurl',
-                 'auto_sub',
+                 'threadmark_category_order',
+                 'threadmarks_per_page',
+                 'title_chapter_range_pattern',
                  'titlepage_end',
                  'titlepage_entries',
                  'titlepage_entry',
                  'titlepage_no_title_entry',
                  'titlepage_start',
-                 'titlepage_use_table',
                  'titlepage_wide_entry',
                  'tocpage_end',
                  'tocpage_entry',
                  'tocpage_start',
-                 'tweak_fg_sleep',
-                 'universe_as_series',
-                 'use_ssl_unverified_context',
-                 'use_ssl_default_seclevelone',
-                 'http_proxy',
-                 'https_proxy',
-                 'use_cloudscraper',
-                 'use_basic_cache',
-                 'use_browser_cache',
-                 'use_browser_cache_only',
-                 'open_pages_in_browser',
-                 'use_nsapa_proxy',
-                 'nsapa_proxy_address',
-                 'nsapa_proxy_port',
-                 'use_flaresolverr_proxy',
-                 'flaresolverr_proxy_address',
-                 'flaresolverr_proxy_port',
-                 'flaresolverr_proxy_protocol',
-                 'flaresolverr_proxy_timeout',
-                 'use_flaresolverr_session',
-                 'flaresolverr_session',
-                 'browser_cache_path',
-                 'browser_cache_age_limit',
                  'user_agent',
                  'username',
                  'website_encodings',
                  'wide_titlepage_entries',
-                 'windows_eol',
                  'wrap_width',
                  'zip_filename',
-                 'zip_output',
-                 'capitalize_forumtags',
-                 'continue_on_chapter_error',
-                 'chapter_title_error_mark',
-                 'continue_on_chapter_error_try_limit',
-                 'minimum_threadmarks',
-                 'first_post_title',
-                 'always_include_first_post',
-                 'always_reload_first_chapter',
-                 'always_use_forumtags',
-                 'use_reader_mode',
-                 'author_avatar_cover',
-                 'reader_posts_per_page',
-                 'threadmarks_per_page',
-                 'remove_spoilers',
-                 'legend_spoilers',
-                 'details_spoilers',
-                 'apocrypha_to_omake',
-                 'skip_threadmarks_categories',
-                 'fix_relative_text_links',
-                 'normalize_text_links',
-                 'internalize_text_links',
-                 'replace_failed_smilies_with_alt_text',
-                 'use_threadmark_wordcounts',
-                 'always_include_first_post_chapters',
-                 'threadmark_category_order',
-                 'order_threadmarks_by_date',
-                 'order_threadmarks_by_date_categories',
-                 'reveal_invisible_text',
-                 'use_threadmarks_description',
-                 'use_threadmarks_status',
-                 'use_threadmarks_cover',
-                 'skip_sticky_first_posts',
-                 'include_dice_rolls',
-                 'include_nonauthor_poster',
-                 'link_embedded_media',
-                 'include_chapter_banner_images',
-                 'dateUpdated_method',
-                 'datethreadmark_format',
-                 'fix_pseudo_html',
-                 'fix_excess_space',
-                 'dedup_order_chapter_list',
-                 'ignore_chapter_url_list',
-                 'include_appendices',
-                 'dedup_chapter_list',
-                 'show_timestamps',
-                 'show_nsfw_cover_images',
-                 'show_spoiler_tags',
-                 'max_zalgo',
-                 'decode_emails',
-                 'epub_version',
-                 'prepend_section_titles',
-                 'replace_text_formatting',
-                 ])
+                 'zip_output'
+                 ]
 
 # *known* entry keywords -- or rather regexps for them.
 def get_valid_entry_keywords():
     return list(['%s_(label|format)',
                  '(default_value|include_in|join_string|keep_in_order)_%s',])
 
+def get_valid_list_entries():
+    return list(['category',
+                 'genre',
+                 'characters',
+                 'ships',
+                 'warnings',
+                 'extratags',
+                 'author',
+                 'authorId',
+                 'authorUrl',
+                 'lastupdate',
+                 ])
+
+def get_valid_scalar_entries():
+    return list(['series',
+                 'seriesUrl',
+                 'language',
+                 'status',
+                 'datePublished',
+                 'dateUpdated',
+                 'dateCreated',
+                 'rating',
+                 'numChapters',
+                 'numWords',
+                 'words_added', # logpage only.
+                 'marked_new_chapters',
+                 'site',
+                 'publisher',
+                 'storyId',
+                 'title',
+                 'titleHTML',
+                 'storyUrl',
+                 'sectionUrl',
+                 'description',
+                 'formatname',
+                 'formatext',
+                 'siteabbrev',
+                 'version',
+                 # internal stuff.
+                 'authorHTML',
+                 'seriesHTML',
+                 'langcode',
+                 'output_css',
+                 'cover_image',
+                 'newforanthology' # internal for plugin anthologies
+                                   # to mark chapters (new) in new
+                                   # stories
+                 ])
+
+def get_valid_entries():
+    return get_valid_list_entries() + get_valid_scalar_entries()
+
+## Metadata entries that are not allowed to be changed.
+def get_immutable_entries():
+    return list([
+            'authorId',
+            'authorUrl',
+            'seriesUrl',
+            'storyId',
+            'storyUrl',
+            'langcode',
+            'numChapters',
+            'site',
+            'anthology',
+            'newforanthology',
+            'cover_image',
+            ])
+
 # Moved here for test_config.
 def make_generate_cover_settings(param):
     vlist = []
@@ -706,6 +603,7 @@
         self.listTypeEntries = get_valid_list_entries()
 
         self.validEntries = get_valid_entries()
+        self.immutableEntries = get_immutable_entries()
 
         self.url_config_set = False
 
@@ -750,6 +648,12 @@
     def getValidMetaList(self):
         return self.validEntries + self.getConfigList("extra_valid_entries")
 
+    def isImmutableMetaEntry(self, key):
+        return key in self.getImmutableMetaList()
+
+    def getImmutableMetaList(self):
+        return self.immutableEntries
+
     # used by adapters & writers, non-convention naming style
     def hasConfig(self, key):
         return self.has_config(self.sectionslist, key)
@@ -1218,6 +1122,9 @@
     def isValidMetaEntry(self, key):
         return self.configuration.isValidMetaEntry(key)
 
+    def isImmutableMetaEntry(self, key):
+        return self.configuration.isImmutableMetaEntry(key)
+
     def getValidMetaList(self):
         return self.configuration.getValidMetaList()
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/fanficfare/defaults.ini 
new/FanFicFare-4.55.0/fanficfare/defaults.ini
--- old/FanFicFare-4.54.0/fanficfare/defaults.ini       2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/defaults.ini       2026-03-01 
16:25:11.000000000 +0100
@@ -1592,18 +1592,13 @@
 
 
 [adult-fanfiction.org]
+use_basic_cache:true
+
 extra_valid_entries:eroticatags,disclaimer
 eroticatags_label:Erotica Tags
 disclaimer_label:Disclaimer
 extra_titlepage_entries:eroticatags,disclaimer
 
-## Some sites require login (or login for some rated stories) The
-## program can prompt you, or you can save it in config.  In
-## commandline version, this should go in your personal.ini, not
-## defaults.ini.
-#username:YourName
-#password:yourpassword
-
 [althistory.com]
 ## Note this is NOT the same as www.alternatehistory.com
 ## see [base_xenforoforum]
@@ -4406,9 +4401,6 @@
 extracharacters:Buffy, Spike
 extraships:Spike/Buffy
 
-[www.swi.org.ru]
-use_basic_cache:true
-
 [www.the-sietch.com]
 ## see [base_xenforoforum]
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/fanficfare/epubutils.py 
new/FanFicFare-4.55.0/fanficfare/epubutils.py
--- old/FanFicFare-4.54.0/fanficfare/epubutils.py       2026-02-01 
16:04:34.000000000 +0100
+++ new/FanFicFare-4.55.0/fanficfare/epubutils.py       2026-03-01 
16:25:11.000000000 +0100
@@ -20,6 +20,8 @@
 from .six import string_types as basestring
 from io import BytesIO
 
+FONT_EXTS = ('ttf','otf','woff','woff2')
+
 # from io import StringIO
 # import cProfile, pstats
 # from pstats import SortKey
@@ -156,7 +158,11 @@
                     # (_u\d+)? is from calibre convert naming files
                     # 3/OEBPS/file0005_u3.xhtml etc.
                     if getsoups:
-                        soup = make_soup(epub.read(href).decode("utf-8"))
+                        try:
+                            soup = make_soup(epub.read(href).decode("utf-8"))
+                        except:
+                            logger.warning("Listed chapter file(%s) not found 
in epub, skipping."%href)
+                            continue
                         for img in soup.find_all('img'):
                             newsrc=''
                             longdesc=''
@@ -191,6 +197,9 @@
                                 for style_url in 
re.findall(r'url\([\'"]?(.*?)[\'"]?\)', style):
                                     if style_url.startswith('failedtoload'):
                                         continue
+                                    if style_url.rsplit('.')[-1].lower() in 
FONT_EXTS:
+                                        logger.debug("Skipping sheet style 
url(%s), assumed font"%style_url)
+                                        continue
                                     logger.debug("Updating inline/embedded 
style url(%s)"%style_url)
                                     newsrc=''
                                     longdesc=''
@@ -257,11 +266,18 @@
             ## update.  output_css is configured, but 'extra_css' like
             ## otw workskin might vary.
             if item.getAttribute("media-type") == "text/css" and getsoups:
-                style = epub.read(href).decode("utf-8")
+                try:
+                    style = epub.read(href).decode("utf-8")
+                except:
+                    logger.warning("Listed CSS file(%s) not found in epub, 
skipping."%href)
+                    continue
                 if 'url(' in style:
                     # logger.debug("%s CSS url:%s"%(href,style))
                     ## the pattern will also accept mismatched '/", which is 
broken CSS.
                     for style_url in re.findall(r'url\([\'"]?(.*?)[\'"]?\)', 
style):
+                        if style_url.rsplit('.')[-1].lower() in FONT_EXTS:
+                            logger.debug("Skipping sheet style url(%s), 
assumed font"%style_url)
+                            continue
                         logger.debug("Updating sheet style url(%s)"%style_url)
                         newsrc=''
                         longdesc=''
@@ -291,7 +307,11 @@
                 img_url = href.replace("OEBPS/","")
                 # logger.debug("-->img img:%s"%img_url)
                 if img_url not in images:
-                    data = epub.read(href)
+                    try:
+                        data = epub.read(href)
+                    except:
+                        logger.warning("Listed image file(%s) not found in 
epub, skipping."%href)
+                        continue
                     # logger.debug("-->img Add oldimages:%s"%href)
                     images[img_url] = (img_url, data)
     try:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/fanficfare/story.py 
new/FanFicFare-4.55.0/fanficfare/story.py
--- old/FanFicFare-4.54.0/fanficfare/story.py   2026-02-01 16:04:34.000000000 
+0100
+++ new/FanFicFare-4.55.0/fanficfare/story.py   2026-03-01 16:25:11.000000000 
+0100
@@ -80,8 +80,10 @@
     def convert_image(url,data,sizes,grayscale,
                       
removetrans,imgtype="jpg",background='#ffffff',jpg_quality=95):
         # logger.debug("calibre convert_image called")
-
-        if url.lower().endswith('.svg') or '.svg?' in url.lower():
+        ## I can just see somebody doing logo_svg.jpg
+        if url.lower().endswith('.svg') or '.svg?' in url.lower() \
+                or ensure_binary('<svg ') in data[:1000] \
+                or ensure_binary('xmlns="http://www.w3.org/2000/svg";') in 
data[:1000]:
             raise exceptions.RejectImage("Calibre image processing chokes on 
SVG images.")
         export = False
         img, format = image_and_format_from_data(data)
@@ -914,7 +916,7 @@
         if key == "language":
             try:
                 # getMetadata not just self.metadata[] to do replace_metadata.
-                self.setMetadata('langcode',langs[self.getMetadata(key)])
+                self.setMetadata('langcode',langs[self.getMetadataRaw(key)])
             except:
                 self.setMetadata('langcode','en')
 
@@ -1138,6 +1140,9 @@
                     removeallentities=False,
                     doreplacements=True,
                     seen_list={}):
+        if self.isImmutableMetaEntry(key):
+            doreplacements = False
+
         # check for a cached value to speed processing
         if 
self.metadata_cache.is_cached_scalar(key,removeallentities,doreplacements):
             return 
self.metadata_cache.get_cached_scalar(key,removeallentities,doreplacements)
@@ -1306,6 +1311,9 @@
         #print("getList(%s,%s)"%(listname,includelist))
         retlist = []
 
+        if self.isImmutableMetaEntry(listname):
+            doreplacements = False
+
         # check for a cached value to speed processing
         if not skip_cache and 
self.metadata_cache.is_cached_list(listname,removeallentities,doreplacements):
             return 
self.metadata_cache.get_cached_list(listname,removeallentities,doreplacements)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/FanFicFare-4.54.0/pyproject.toml 
new/FanFicFare-4.55.0/pyproject.toml
--- old/FanFicFare-4.54.0/pyproject.toml        2026-02-01 16:04:34.000000000 
+0100
+++ new/FanFicFare-4.55.0/pyproject.toml        2026-03-01 16:25:11.000000000 
+0100
@@ -16,7 +16,7 @@
 #
 # For a discussion on single-sourcing the version, see
 # https://packaging.python.org/guides/single-sourcing-package-version/
-version = "4.54.0"
+version = "4.55.0"
 
 # This is a one-line description or tagline of what your project does. This
 # corresponds to the "Summary" metadata field:

Reply via email to