Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package python-fanficfare for
openSUSE:Factory checked in at 2022-02-15 23:57:48
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old)
and /work/SRC/openSUSE:Factory/.python-fanficfare.new.1956 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-fanficfare"
Tue Feb 15 23:57:48 2022 rev:38 rq:955094 version:4.10.0
Changes:
--------
--- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes
2022-01-26 21:27:52.513623913 +0100
+++
/work/SRC/openSUSE:Factory/.python-fanficfare.new.1956/python-fanficfare.changes
2022-02-15 23:58:16.088376180 +0100
@@ -1,0 +2,27 @@
+Tue Feb 15 18:30:59 UTC 2022 - Matej Cepl <[email protected]>
+
+- Upgrade to 4.10.0:
+ - adapter_fanfiktionde: Update where description comes from.
+ - ReadOnlyMindAdapter: Add series_tags feature to populate
+ series metadata (#803) - Thanks Nothorse
+ - Add use_flaresolverr_proxy:withimages option for FlareSolverr
+ v1 users.
+ - Correct use_flaresolverr_proxy error checking.
+ - adapter_royalroadcom: Add status 'Dropped'
+ - New Site: readonlymind.com, thanks Nothorse Issue #767 PR
+ #801
+ - Force include_images:false when use_flaresolverr_proxy:true
+ -- FlareSolverr v2.2.0 crashes on image request.
+ - Remove defunct site: hpfanficarchive.com
+ - base_efiction: Add 'Igen' as equiv to 'Yes, Completed' in
+ Hungarian
+ - adapter_royalroadcom: Add status 'Stub' Closes #800
+ - New site: merengo.hu (Hungarian), thanks estherflails
+ - Remove site: fanfic.hu (moved to merengo.hu, storyIds don't
+ appear to be the same)
+ - Fix for py2 for base_xenforoforum tagsfromtitle. Thanks hseg
+ for the help. See #791
+ - Extend base_xenforoforum tagsfromtitle for ')(' ']['
+ - Changes for upcoming Qt6 Calibre
+
+-------------------------------------------------------------------
Old:
----
FanFicFare-4.9.0.tar.gz
New:
----
FanFicFare-4.10.0.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-fanficfare.spec ++++++
--- /var/tmp/diff_new_pack.vpnaj2/_old 2022-02-15 23:58:16.536377417 +0100
+++ /var/tmp/diff_new_pack.vpnaj2/_new 2022-02-15 23:58:16.540377428 +0100
@@ -21,7 +21,7 @@
%define skip_python2 1
%{?!python_module:%define python_module() python-%{**} python3-%{**}}
Name: python-fanficfare
-Version: 4.9.0
+Version: 4.10.0
Release: 0
Summary: Tool for making eBooks from stories on fanfiction and other
web sites
License: GPL-3.0-only
@@ -34,6 +34,7 @@
BuildRequires: %{python_module cloudscraper}
BuildRequires: %{python_module html2text}
BuildRequires: %{python_module html5lib}
+BuildRequires: %{python_module requests-file}
BuildRequires: %{python_module setuptools >= 17.1}
BuildRequires: dos2unix
BuildRequires: fdupes
@@ -43,6 +44,7 @@
Requires: python-cloudscraper
Requires: python-html2text
Requires: python-html5lib
+Requires: python-requests-file
Requires: python-setuptools
Requires(post): update-alternatives
Requires(postun):update-alternatives
++++++ FanFicFare-4.9.0.tar.gz -> FanFicFare-4.10.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/__init__.py
new/FanFicFare-4.10.0/calibre-plugin/__init__.py
--- old/FanFicFare-4.9.0/calibre-plugin/__init__.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/__init__.py 2022-02-14
16:39:42.000000000 +0100
@@ -33,7 +33,7 @@
from calibre.customize import InterfaceActionBase
# pulled out from FanFicFareBase for saving in prefs.py
-__version__ = (4, 9, 0)
+__version__ = (4, 10, 0)
## Apparently the name for this class doesn't matter--it was still
## 'demo' for the first few versions.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/calibre-plugin/basicinihighlighter.py
new/FanFicFare-4.10.0/calibre-plugin/basicinihighlighter.py
--- old/FanFicFare-4.9.0/calibre-plugin/basicinihighlighter.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/basicinihighlighter.py 2022-02-14
16:39:42.000000000 +0100
@@ -9,10 +9,7 @@
import re
-try:
- from PyQt5.Qt import (Qt, QSyntaxHighlighter, QTextCharFormat, QBrush)
-except ImportError as e:
- from PyQt4.Qt import (Qt, QSyntaxHighlighter, QTextCharFormat, QBrush)
+from PyQt5.Qt import (Qt, QSyntaxHighlighter, QTextCharFormat, QBrush)
from fanficfare.six import string_types
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/common_utils.py
new/FanFicFare-4.10.0/calibre-plugin/common_utils.py
--- old/FanFicFare-4.9.0/calibre-plugin/common_utils.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/common_utils.py 2022-02-14
16:39:42.000000000 +0100
@@ -10,18 +10,10 @@
import os
from contextlib import contextmanager
-try:
- from PyQt5 import QtWidgets as QtGui
- from PyQt5.Qt import (QApplication, Qt, QIcon, QPixmap, QLabel, QDialog,
QHBoxLayout,
- QTableWidgetItem, QFont, QLineEdit, QComboBox,
- QVBoxLayout, QDialogButtonBox, QStyledItemDelegate,
QDateTime,
- QTextEdit, QListWidget, QAbstractItemView, QCursor)
-except ImportError as e:
- from PyQt4 import QtGui
- from PyQt4.Qt import (QApplication, Qt, QIcon, QPixmap, QLabel, QDialog,
QHBoxLayout,
- QTableWidgetItem, QFont, QLineEdit, QComboBox,
- QVBoxLayout, QDialogButtonBox, QStyledItemDelegate,
QDateTime,
- QTextEdit, QListWidget, QAbstractItemView, QCursor)
+from PyQt5.Qt import (QApplication, Qt, QIcon, QPixmap, QLabel, QDialog,
QHBoxLayout,
+ QTableWidgetItem, QFont, QLineEdit, QComboBox,
+ QVBoxLayout, QDialogButtonBox, QStyledItemDelegate,
QDateTime,
+ QTextEdit, QListWidget, QAbstractItemView, QCursor)
from calibre.constants import iswindows, DEBUG
from calibre.gui2 import UNDEFINED_QDATETIME, gprefs, info_dialog
@@ -266,7 +258,7 @@
def __init__(self, text):
if text is None:
text = ''
- QTableWidgetItem.__init__(self, text, QtGui.QTableWidgetItem.UserType)
+ QTableWidgetItem.__init__(self, text)
self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled|Qt.ItemIsEditable)
class ReadOnlyTableWidgetItem(QTableWidgetItem):
@@ -274,14 +266,14 @@
def __init__(self, text):
if text is None:
text = ''
- QTableWidgetItem.__init__(self, text, QtGui.QTableWidgetItem.UserType)
+ QTableWidgetItem.__init__(self, text)
self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
class RatingTableWidgetItem(QTableWidgetItem):
def __init__(self, rating, is_read_only=False):
- QTableWidgetItem.__init__(self, '', QtGui.QTableWidgetItem.UserType)
+ QTableWidgetItem.__init__(self, '')
self.setData(Qt.DisplayRole, rating)
if is_read_only:
self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
@@ -293,10 +285,10 @@
if date_read == UNDEFINED_DATE and default_to_today:
date_read = now()
if is_read_only:
- QTableWidgetItem.__init__(self, format_date(date_read, None),
QtGui.QTableWidgetItem.UserType)
+ QTableWidgetItem.__init__(self, format_date(date_read, None))
self.setFlags(Qt.ItemIsSelectable|Qt.ItemIsEnabled)
else:
- QTableWidgetItem.__init__(self, '',
QtGui.QTableWidgetItem.UserType)
+ QTableWidgetItem.__init__(self, '')
self.setData(Qt.DisplayRole, QDateTime(date_read))
@@ -507,7 +499,6 @@
self.keys_list.setAlternatingRowColors(True)
ml.addWidget(self.keys_list)
self.value_text = QTextEdit(self)
- self.value_text.setTabStopWidth(24)
self.value_text.setReadOnly(True)
ml.addWidget(self.value_text, 1)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/config.py
new/FanFicFare-4.10.0/calibre-plugin/config.py
--- old/FanFicFare-4.9.0/calibre-plugin/config.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/config.py 2022-02-14
16:39:42.000000000 +0100
@@ -15,33 +15,11 @@
import threading
from collections import OrderedDict
-try:
- from PyQt5 import QtWidgets as QtGui
- from PyQt5.Qt import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout,
QLabel,
- QLineEdit, QComboBox, QCheckBox, QPushButton,
QTabWidget,
- QScrollArea, QGroupBox, QButtonGroup, QRadioButton,
- Qt)
-except ImportError as e:
- from PyQt4 import QtGui
- from PyQt4.Qt import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout,
QLabel,
- QLineEdit, QComboBox, QCheckBox, QPushButton,
QTabWidget,
- QScrollArea, QGroupBox, QButtonGroup, QRadioButton,
- Qt)
-try:
- from calibre.gui2 import QVariant
- del QVariant
-except ImportError:
- is_qt4 = False
- convert_qvariant = lambda x: x
-else:
- is_qt4 = True
- def convert_qvariant(x):
- vt = x.type()
- if vt == x.String:
- return unicode(x.toString())
- if vt == x.List:
- return [convert_qvariant(i) for i in x.toList()]
- return x.toPyObject()
+from PyQt5 import QtWidgets as QtGui
+from PyQt5.Qt import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel,
+ QLineEdit, QComboBox, QCheckBox, QPushButton, QTabWidget,
+ QScrollArea, QGroupBox, QButtonGroup, QRadioButton,
+ Qt)
from calibre.gui2 import dynamic, info_dialog
from calibre.gui2.complete2 import EditWithComplete
@@ -357,7 +335,7 @@
prefs['gcnewonly'] = self.calibrecover_tab.gcnewonly.isChecked()
gc_site_settings = {}
for (site,combo) in
six.iteritems(self.calibrecover_tab.gc_dropdowns):
- val =
unicode(convert_qvariant(combo.itemData(combo.currentIndex())))
+ val = unicode(combo.itemData(combo.currentIndex()))
if val != 'none':
gc_site_settings[site] = val
#print("gc_site_settings[%s]:%s"%(site,gc_site_settings[site]))
@@ -400,19 +378,19 @@
# Custom Columns tab
# error column
- prefs['errorcol'] =
unicode(convert_qvariant(self.cust_columns_tab.errorcol.itemData(self.cust_columns_tab.errorcol.currentIndex())))
+ prefs['errorcol'] =
unicode(self.cust_columns_tab.errorcol.itemData(self.cust_columns_tab.errorcol.currentIndex()))
prefs['save_all_errors'] =
self.cust_columns_tab.save_all_errors.isChecked()
# metadata column
- prefs['savemetacol'] =
unicode(convert_qvariant(self.cust_columns_tab.savemetacol.itemData(self.cust_columns_tab.savemetacol.currentIndex())))
+ prefs['savemetacol'] =
unicode(self.cust_columns_tab.savemetacol.itemData(self.cust_columns_tab.savemetacol.currentIndex()))
# lastchecked column
- prefs['lastcheckedcol'] =
unicode(convert_qvariant(self.cust_columns_tab.lastcheckedcol.itemData(self.cust_columns_tab.lastcheckedcol.currentIndex())))
+ prefs['lastcheckedcol'] =
unicode(self.cust_columns_tab.lastcheckedcol.itemData(self.cust_columns_tab.lastcheckedcol.currentIndex()))
# cust cols tab
colsmap = {}
for (col,combo) in
six.iteritems(self.cust_columns_tab.custcol_dropdowns):
- val =
unicode(convert_qvariant(combo.itemData(combo.currentIndex())))
+ val = unicode(combo.itemData(combo.currentIndex()))
if val != 'none':
colsmap[col] = val
#print("colsmap[%s]:%s"%(col,colsmap[col]))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/dialogs.py
new/FanFicFare-4.10.0/calibre-plugin/dialogs.py
--- old/FanFicFare-4.9.0/calibre-plugin/dialogs.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/dialogs.py 2022-02-14
16:39:42.000000000 +0100
@@ -16,38 +16,19 @@
from datetime import datetime
+from PyQt5 import QtWidgets as QtGui
+from PyQt5 import QtCore
+from PyQt5.Qt import (QApplication, QDialog, QWidget, QTableWidget,
QVBoxLayout, QHBoxLayout,
+ QGridLayout, QPushButton, QFont, QLabel, QCheckBox,
QIcon,
+ QLineEdit, QComboBox, QProgressDialog, QTimer,
QDialogButtonBox,
+ QScrollArea, QPixmap, Qt, QAbstractItemView, QTextEdit,
+ pyqtSignal, QGroupBox, QFrame)
try:
- from PyQt5 import QtWidgets as QtGui
- from PyQt5 import QtCore
- from PyQt5.Qt import (QApplication, QDialog, QWidget, QTableWidget,
QVBoxLayout, QHBoxLayout,
- QGridLayout, QPushButton, QFont, QLabel, QCheckBox,
QIcon,
- QLineEdit, QComboBox, QProgressDialog, QTimer,
QDialogButtonBox,
- QScrollArea, QPixmap, Qt, QAbstractItemView,
QTextEdit,
- pyqtSignal, QGroupBox, QFrame)
-except ImportError as e:
- from PyQt4 import QtGui
- from PyQt4 import QtCore
- from PyQt4.Qt import (QApplication, QDialog, QWidget, QTableWidget,
QVBoxLayout, QHBoxLayout,
- QGridLayout, QPushButton, QFont, QLabel, QCheckBox,
QIcon,
- QLineEdit, QComboBox, QProgressDialog, QTimer,
QDialogButtonBox,
- QScrollArea, QPixmap, Qt, QAbstractItemView,
QTextEdit,
- pyqtSignal, QGroupBox, QFrame)
-
-try:
- from calibre.gui2 import QVariant
- del QVariant
-except ImportError:
- is_qt4 = False
- convert_qvariant = lambda x: x
-else:
- is_qt4 = True
- def convert_qvariant(x):
- vt = x.type()
- if vt == x.String:
- return unicode(x.toString())
- if vt == x.List:
- return [convert_qvariant(i) for i in x.toList()]
- return x.toPyObject()
+ # qt6 Calibre v6+
+ QTextEditNoWrap = QTextEdit.LineWrapMode.NoWrap
+except:
+ # qt5 Calibre v2-5
+ QTextEditNoWrap = QTextEdit.NoWrap
from calibre.gui2 import gprefs
show_download_options = 'fff:add new/update dialogs:show_download_options'
@@ -249,7 +230,7 @@
self.url = DroppableQTextEdit(self)
self.url.setToolTip("UrlTooltip")
- self.url.setLineWrapMode(QTextEdit.NoWrap)
+ self.url.setLineWrapMode(QTextEditNoWrap)
self.l.addWidget(self.url)
self.groupbox = QGroupBox(_("Show Download Options"))
@@ -1028,7 +1009,7 @@
books = []
#print("=========================\nbooks:%s"%self.books)
for row in range(self.rowCount()):
- rnum = convert_qvariant(self.item(row, 1).data(Qt.UserRole))
+ rnum = self.item(row, 1).data(Qt.UserRole)
book = self.books[rnum]
books.append(book)
return books
@@ -1216,7 +1197,7 @@
rejectrows = []
for row in range(self.rejects_table.rowCount()):
url = unicode(self.rejects_table.item(row, 0).text()).strip()
- book_id =convert_qvariant(self.rejects_table.item(row,
0).data(Qt.UserRole))
+ book_id =self.rejects_table.item(row, 0).data(Qt.UserRole)
title = unicode(self.rejects_table.item(row, 1).text()).strip()
auth = unicode(self.rejects_table.item(row, 2).text()).strip()
note = unicode(self.rejects_table.cellWidget(row,
3).currentText()).strip()
@@ -1226,7 +1207,7 @@
def get_reject_list_ids(self):
rejectrows = []
for row in range(self.rejects_table.rowCount()):
- book_id = convert_qvariant(self.rejects_table.item(row,
0).data(Qt.UserRole))
+ book_id = self.rejects_table.item(row, 0).data(Qt.UserRole)
if book_id:
rejectrows.append(book_id)
return rejectrows
@@ -1261,7 +1242,7 @@
self.l.addWidget(self.label)
self.textedit = QTextEdit(self)
- self.textedit.setLineWrapMode(QTextEdit.NoWrap)
+ self.textedit.setLineWrapMode(QTextEditNoWrap)
self.textedit.setReadOnly(read_only)
self.textedit.setText(text)
self.l.addWidget(self.textedit)
@@ -1334,7 +1315,7 @@
entry_keywords=get_valid_entry_keywords(),
)
- self.textedit.setLineWrapMode(QTextEdit.NoWrap)
+ self.textedit.setLineWrapMode(QTextEditNoWrap)
try:
self.textedit.setFont(QFont("Courier",
parent.font().pointSize()+1))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/fff_plugin.py
new/FanFicFare-4.10.0/calibre-plugin/fff_plugin.py
--- old/FanFicFare-4.9.0/calibre-plugin/fff_plugin.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/fff_plugin.py 2022-02-14
16:39:42.000000000 +0100
@@ -40,10 +40,7 @@
import traceback
from collections import defaultdict
-try:
- from PyQt5.Qt import (QApplication, QMenu, QTimer, QToolButton)
-except ImportError as e:
- from PyQt4.Qt import (QApplication, QMenu, QTimer, QToolButton)
+from PyQt5.Qt import (QApplication, QMenu, QTimer, QToolButton)
from calibre.constants import numeric_version as calibre_version
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/inihighlighter.py
new/FanFicFare-4.10.0/calibre-plugin/inihighlighter.py
--- old/FanFicFare-4.9.0/calibre-plugin/inihighlighter.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/inihighlighter.py 2022-02-14
16:39:42.000000000 +0100
@@ -12,10 +12,17 @@
import logging
logger = logging.getLogger(__name__)
+from PyQt5.Qt import (QApplication, Qt, QColor, QSyntaxHighlighter,
+ QTextCharFormat, QBrush, QFont)
+
try:
- from PyQt5.Qt import (QApplication, Qt, QColor, QSyntaxHighlighter,
QTextCharFormat, QBrush, QFont)
-except ImportError as e:
- from PyQt4.Qt import (QApplication, Qt, QColor, QSyntaxHighlighter,
QTextCharFormat, QBrush, QFont)
+ # qt6 Calibre v6+
+ QFontNormal = QFont.Weight.Normal
+ QFontBold = QFont.Weight.Bold
+except:
+ # qt5 Calibre v2-5
+ QFontNormal = QFont.Normal
+ QFontBold = QFont.Bold
from fanficfare.six import string_types
@@ -83,13 +90,13 @@
self.highlightingRules.append( HighlightingRule(
r"^(add_to_)?"+rekeywords+r"(_filelist)?\s*[:=]", colors['knownkeywords'] ) )
# *all* sections -- change known later.
- self.highlightingRules.append( HighlightingRule( r"^\[[^\]]+\].*?$",
colors['errors'], QFont.Bold, blocknum=1 ) )
+ self.highlightingRules.append( HighlightingRule( r"^\[[^\]]+\].*?$",
colors['errors'], QFontBold, blocknum=1 ) )
if sections:
# *known* sections
resections = r'('+(r'|'.join(sections))+r')'
resections = resections.replace('.','\.') #escape dots.
- self.highlightingRules.append( HighlightingRule(
r"^\["+resections+r"\]\s*$", colors['knownsections'], QFont.Bold, blocknum=2 ) )
+ self.highlightingRules.append( HighlightingRule(
r"^\["+resections+r"\]\s*$", colors['knownsections'], QFontBold, blocknum=2 ) )
# test story sections
self.teststoryRule = HighlightingRule(
r"^\[teststory:([0-9]+|defaults)\]", colors['teststories'], blocknum=3 )
@@ -135,7 +142,7 @@
class HighlightingRule():
def __init__( self, pattern, color,
- weight=QFont.Normal,
+ weight=QFontNormal,
style=Qt.SolidPattern,
blocknum=0):
if isinstance(pattern, string_types):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/calibre-plugin/plugin-defaults.ini
new/FanFicFare-4.10.0/calibre-plugin/plugin-defaults.ini
--- old/FanFicFare-4.9.0/calibre-plugin/plugin-defaults.ini 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/calibre-plugin/plugin-defaults.ini 2022-02-14
16:39:42.000000000 +0100
@@ -570,6 +570,30 @@
## used. If set to -1, all cached files will be used.
browser_cache_age_limit:4.0
+## As a (second) work around for certain sites blocking automated
+## downloads, FFF offers the ability to request pages through nsapa's
+## fanfictionnet_ff_proxy and FlareSolverr proxy servers. See
+## https://github.com/JimmXinu/FanFicFare/wiki/ProxyFeatures for more
+## details.
+
+## FlareSolverr (https://github.com/FlareSolverr/FlareSolverr) is a
+## generic proxy that works with several otherwise blocked sites.
+## It's recommended to only set use_flaresolverr_proxy:true for
+## specific sites.
+## FlareSolverr v1 doesn't work with some sites anymore (including
+## ffnet), but FlareSolverr v2+ cannot download images.
+## use_flaresolverr_proxy:true assumes FSv2 and automatically sets
+## include_images:false
+## If you want to use FSv1 with images, you can set
+## use_flaresolverr_proxy:withimages
+
+#[www.fanfiction.net]
+#use_flaresolverr_proxy:true
+## option settings, these are the defaults:
+#flaresolverr_proxy_address:localhost
+#flaresolverr_proxy_port:8191
+#flaresolverr_proxy_protocol:http
+
## Because some adapters can pull chapter URLs from human posts, the
## odds of errors in the chapter URLs can be higher for some
## sites/stories. You can set continue_on_chapter_error:true to
@@ -695,10 +719,14 @@
# previous version's include_metadata_pre Can't do on tagsfromtitle
# because that's applied to each part after split.
tagsfromtitledetect=>^[^\]\)]+$=>
+# change ][ and )( to , for [AU][Othertag] etc
+ tagsfromtitle=>\] *\[=>,
+ tagsfromtitle=>\) *\(=>,
# for QuestionableQuesting NSFW subforum.
- tagsfromtitle=>^\[NSFW\].*?(\[([^\]]+)\]|\(([^\)]+)\)).*?$=>NSFW\,\2\,\3
-# remove anything outside () or []
- tagsfromtitle=>^.*?(\[([^\]]+)\]|\(([^\)]+)\)).*?$=>\2\,\3
+
tagsfromtitle=>^\[NSFW\].*?((?P<br>\[)|(?P<pr>\())(?P<tag>(?(br)[^\]]|(?(pr)[^\)]))+)(?(br)\]|(?(pr)\))).*?$=>NSFW\,\g<tag>
+# remove anything outside () or []. Note \, at the end used to
+# prevent looping back so '[Worm(AU)]' becomes 'Worm(AU)' not just 'AU'
+
tagsfromtitle=>^.*?((?P<br>\[)|(?P<pr>\())(?P<tag>(?(br)[^\]]|(?(pr)[^\)]))+)(?(br)\]|(?(pr)\))).*?$=>\g<tag>\,
# remove () []
# tagsfromtitle=>[\(\)\[\]]=>
# shield these html entities from the ';' pattern below
@@ -1702,19 +1730,6 @@
website_encodings:Windows-1252,utf8
-[fanfic.hu]
-## website encoding(s) In theory, each website reports the character
-## encoding they use for each page. In practice, some sites report it
-## incorrectly. Each adapter has a default list, usually "utf8,
-## Windows-1252" or "Windows-1252, utf8", but this will let you
-## explicitly set the encoding and order if you need to. The special
-## value 'auto' will call chardet and use the encoding it reports if
-## it has +90% confidence. 'auto' is not reliable.
-website_encodings:ISO-8859-1,auto
-
-## Site dedicated to these categories/characters/ships
-extracategories:Harry Potter
-
[fanfic.potterheadsanonymous.com]
## Some sites do not require a login, but do require the user to
## confirm they are adult for adult content. In commandline version,
@@ -2293,6 +2308,30 @@
website_encodings: utf8:ignore, Windows-1252, iso-8859-1
+[readonlymind.com]
+## Some sites do not require a login, but do require the user to
+## confirm they are adult for adult content. In commandline version,
+## this should go in your personal.ini, not defaults.ini.
+## Login on readonlymind.com is optional and not used for adultcheck
+#is_adult:true
+
+## Clear FanFiction from defaults, site is original fiction.
+extratags:Erotica
+
+extra_valid_entries:eroticatags
+eroticatags_label:Erotica Tags
+extra_titlepage_entries: eroticatags
+
+## some tags are used as series identification. There is no way to find a
+## sequence, but at least the stories will be grouped. Use the tag as written
+## without the hash mark. Keep underscores '_' in, they will be replaced by
+## spaces in the metadata.
+#series_tags:Human_Domestication_Guide
+
+## If you want underscores replaced in the tags:
+#add_to_replace_metadata:
+# eroticatags=>_=>\s
+
[samandjack.net]
## Some sites require login (or login for some rated stories) The
## program can prompt you, or you can save it in config. In
@@ -3257,12 +3296,6 @@
# numWords=~.*
# size=~.*
-[www.hpfanficarchive.com]
-## Site dedicated to these categories/characters/ships
-extracategories:Harry Potter
-
-website_encodings:Windows-1252,utf8
-
[www.ik-eternal.net]
## Some sites require login (or login for some rated stories) The
## program can prompt you, or you can save it in config. In
@@ -3811,4 +3844,3 @@
## Clear FanFiction from defaults, site is original fiction.
extratags:
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/fanficfare/adapters/__init__.py
new/FanFicFare-4.10.0/fanficfare/adapters/__init__.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/__init__.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/__init__.py 2022-02-14
16:39:42.000000000 +0100
@@ -70,7 +70,6 @@
from . import adapter_pretendercentrecom
from . import adapter_darksolaceorg
from . import adapter_finestoriescom
-from . import adapter_hpfanficarchivecom
from . import adapter_hlfictionnet
from . import adapter_dracoandginnycom
from . import adapter_scarvesandcoffeenet
@@ -88,7 +87,6 @@
from . import adapter_voracity2eficcom
from . import adapter_spikeluvercom
from . import adapter_bloodshedversecom
-from . import adapter_fanfichu
from . import adapter_fictionmaniatv
from . import adapter_themaplebookshelf
from . import adapter_sheppardweircom
@@ -165,6 +163,8 @@
from . import adapter_worldofxde
from . import adapter_psychficcom
from . import adapter_deviantartcom
+from . import adapter_merengohu
+from . import adapter_readonlymindcom
## This bit of complexity allows adapters to be added by just adding
## importing. It eliminates the long if/else clauses we used to need
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_fanfichu.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_fanfichu.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_fanfichu.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_fanfichu.py
1970-01-01 01:00:00.000000000 +0100
@@ -1,187 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014 Fanficdownloader team, 2018 FanFicFare team
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import absolute_import
-import re
-# py2 vs py3 transition
-from ..six import ensure_text
-from ..six.moves.urllib import parse as urlparse
-
-from .base_adapter import BaseSiteAdapter, makeDate
-from .. import exceptions
-
-
-_SOURCE_CODE_ENCODING = 'utf-8'
-
-
-def getClass():
- return FanficHuAdapter
-
-
-def _get_query_data(url):
- components = urlparse.urlparse(url)
- query_data = urlparse.parse_qs(components.query)
- return dict((key, data[0]) for key, data in query_data.items())
-
-
-class FanficHuAdapter(BaseSiteAdapter):
- SITE_ABBREVIATION = 'ffh'
- SITE_DOMAIN = 'fanfic.hu'
- SITE_LANGUAGE = 'Hungarian'
-
- BASE_URL = 'https://' + SITE_DOMAIN + '/merengo/'
- VIEW_STORY_URL_TEMPLATE = BASE_URL + 'viewstory.php?sid=%s'
-
- DATE_FORMAT = '%m/%d/%Y'
-
- def __init__(self, config, url):
- BaseSiteAdapter.__init__(self, config, url)
-
- query_data = urlparse.parse_qs(self.parsedUrl.query)
- story_id = query_data['sid'][0]
-
- self.story.setMetadata('storyId', story_id)
- self._setURL(self.VIEW_STORY_URL_TEMPLATE % story_id)
- self.story.setMetadata('siteabbrev', self.SITE_ABBREVIATION)
- self.story.setMetadata('language', self.SITE_LANGUAGE)
-
- @staticmethod
- def getSiteDomain():
- return FanficHuAdapter.SITE_DOMAIN
-
- @classmethod
- def getSiteExampleURLs(cls):
- return cls.VIEW_STORY_URL_TEMPLATE % 1234
-
- def getSiteURLPattern(self):
- return
re.escape(self.VIEW_STORY_URL_TEMPLATE[:-2]).replace('https','https?') + r'\d+$'
-
- def extractChapterUrlsAndMetadata(self):
- soup = self.make_soup(self.get_request(self.url + '&i=1'))
-
- if ensure_text(soup.title.string).strip(u' :') == u'??rta':
- raise exceptions.StoryDoesNotExist(self.url)
-
- chapter_options = soup.find('form',
action='viewstory.php').select('option')
- # Remove redundant "Fejezetek" option
- chapter_options.pop(0)
-
- # If there is still more than one entry remove chapter overview entry
- if len(chapter_options) > 1:
- chapter_options.pop(0)
-
- for option in chapter_options:
- url = urlparse.urljoin(self.url, option['value'])
- self.add_chapter(option.string, url)
-
- author_url = urlparse.urljoin(self.BASE_URL, soup.find('a',
href=lambda href: href and href.startswith('viewuser.php?uid='))['href'])
- soup = self.make_soup(self.get_request(author_url))
-
- story_id = self.story.getMetadata('storyId')
- for table in soup('table', {'class': 'mainnav'}):
- title_anchor = table.find('span', {'class': 'storytitle'}).a
- href = title_anchor['href']
- if href.startswith('javascript:'):
- href = href.rsplit(' ', 1)[1].strip("'")
- query_data = _get_query_data(href)
-
- if query_data['sid'] == story_id:
- break
- else:
- # This should never happen, the story must be found on the author's
- # page.
- raise exceptions.FailedToDownload(self.url)
-
- self.story.setMetadata('title', title_anchor.string)
-
- rows = table('tr')
-
- anchors = rows[0].div('a')
- author_anchor = anchors[1]
- query_data = _get_query_data(author_anchor['href'])
- self.story.setMetadata('author', author_anchor.string)
- self.story.setMetadata('authorId', query_data['uid'])
- self.story.setMetadata('authorUrl', urlparse.urljoin(self.BASE_URL,
author_anchor['href']))
- self.story.setMetadata('reviews', anchors[3].string)
-
- if self.getConfig('keep_summary_html'):
- self.story.setMetadata('description',
self.utf8FromSoup(author_url, rows[1].td))
- else:
- self.story.setMetadata('description',
''.join(rows[1].td(text=True)))
-
- for row in rows[3:]:
- index = 0
- cells = row('td')
-
- while index < len(cells):
- cell = cells[index]
- key = ensure_text(cell.b.string).strip(u':')
- try:
- value = ensure_text(cells[index+1].string)
- except:
- value = None
-
- if key == u'Kateg??ria':
- for anchor in cells[index+1]('a'):
- self.story.addToList('category', anchor.string)
-
- elif key == u'Szerepl??k':
- if cells[index+1].string:
- for name in cells[index+1].string.split(', '):
- self.story.addToList('character', name)
-
- elif key == u'Korhat??r':
- if value != 'nem korhat??ros':
- self.story.setMetadata('rating', value)
-
- elif key == u'Figyelmeztet??sek':
- for b_tag in cells[index+1]('b'):
- self.story.addToList('warnings', b_tag.string)
-
- elif key == u'Jellemz??k':
- for genre in cells[index+1].string.split(', '):
- self.story.addToList('genre', genre)
-
- elif key == u'Fejezetek':
- self.story.setMetadata('numChapters', int(value))
-
- elif key == u'Megjelen??s':
- self.story.setMetadata('datePublished', makeDate(value,
self.DATE_FORMAT))
-
- elif key == u'Friss??t??s':
- self.story.setMetadata('dateUpdated', makeDate(value,
self.DATE_FORMAT))
-
- elif key == u'Szavak':
- self.story.setMetadata('numWords', value)
-
- elif key == u'Befejezett':
- self.story.setMetadata('status', 'Completed' if value ==
'Nem' else 'In-Progress')
-
- index += 2
-
- if self.story.getMetadata('rating') == '18':
- if not (self.is_adult or self.getConfig('is_adult')):
- raise exceptions.AdultCheckRequired(self.url)
-
- def getChapterText(self, url):
- soup = self.make_soup(self.get_request(url))
- story_cell = soup.find('form', action='viewstory.php').parent.parent
-
- for div in story_cell('div'):
- div.extract()
-
- return self.utf8FromSoup(url, story_cell)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_fanfictionnet.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_fanfictionnet.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_fanfictionnet.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_fanfictionnet.py
2022-02-14 16:39:42.000000000 +0100
@@ -306,7 +306,7 @@
logger.debug("cover_url:%s"%cover_url)
authimg_url = ""
- if cover_url and self.getConfig('skip_author_cover'):
+ if cover_url and self.getConfig('skip_author_cover') and
self.getConfig('include_images'):
try:
authsoup =
self.make_soup(self.get_request(self.story.getMetadata('authorUrl')))
try:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_fanfiktionde.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_fanfiktionde.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_fanfiktionde.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_fanfiktionde.py
2022-02-14 16:39:42.000000000 +0100
@@ -119,7 +119,7 @@
raise exceptions.FailedToDownload(self.getSiteDomain() +" says:
Auserhalb der Zeit von 23:00 Uhr bis 04:00 Uhr ist diese Geschichte nur nach
einer erfolgreichen Altersverifikation zuganglich.")
soup = self.make_soup(data)
- # print data
+ # logger.debug(data)
## Title
@@ -172,10 +172,12 @@
else:
self.story.setMetadata('status', 'In-Progress')
- ## Get description from own URL:
- ## /?a=v&storyid=46ccbef30000616306614050&s=1
- descsoup =
self.make_soup(self.get_request("https://"+self.getSiteDomain()+"/?a=v&storyid="+self.story.getMetadata('storyId')+"&s=1"))
- self.setDescription(url,stripHTML(descsoup))
+ ## Get description
+ descdiv = soup.select_one('div#story-summary-inline div')
+ if descdiv:
+ if 'center' in descdiv['class']:
+ del descdiv['class']
+ self.setDescription(url,descdiv)
# #find metadata on the author's page
# asoup =
self.make_soup(self.get_request("https://"+self.getSiteDomain()+"?a=q&a1=v&t=nickdetailsstories&lbi=stories&ar=0&nick="+self.story.getMetadata('authorId')))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_hpfanficarchivecom.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_hpfanficarchivecom.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_hpfanficarchivecom.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_hpfanficarchivecom.py
1970-01-01 01:00:00.000000000 +0100
@@ -1,215 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2012 Fanficdownloader team, 2018 FanFicFare team
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Software: eFiction
-from __future__ import absolute_import
-import logging
-logger = logging.getLogger(__name__)
-import re
-from bs4.element import Comment
-from ..htmlcleanup import stripHTML
-from .. import exceptions as exceptions
-
-# py2 vs py3 transition
-from ..six import text_type as unicode
-
-from .base_adapter import BaseSiteAdapter, makeDate
-
-def getClass():
- return HPFanficArchiveComAdapter
-
-# Class name has to be unique. Our convention is camel case the
-# sitename with Adapter at the end. www is skipped.
-class HPFanficArchiveComAdapter(BaseSiteAdapter):
-
- def __init__(self, config, url):
- BaseSiteAdapter.__init__(self, config, url)
-
- self.username = "NoneGiven" # if left empty, site doesn't return any
message at all.
- self.password = ""
- self.is_adult=False
-
- # get storyId from url--url validation guarantees query is only
sid=1234
- self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
-
- # normalized story URL.
- self._setURL( self.getProtocol() + self.getSiteDomain() +
'/stories/viewstory.php?sid='+self.story.getMetadata('storyId'))
-
- # Each adapter needs to have a unique site abbreviation.
- self.story.setMetadata('siteabbrev','hpffa')
-
- # The date format will vary from site to site.
- #
http://docs.python.org/library/datetime.html#strftime-strptime-behavior
- self.dateformat = "%B %d, %Y"
-
- @staticmethod # must be @staticmethod, don't remove it.
- def getSiteDomain():
- # The site domain. Does have www here, if it uses it.
- return 'hpfanficarchive.com'
-
- @classmethod
- def getProtocol(cls):
- # has changed from http to https to http again.
- return "http://"
-
- @classmethod
- def getSiteExampleURLs(cls):
- return
cls.getProtocol()+cls.getSiteDomain()+"/stories/viewstory.php?sid=1234"
-
- def getSiteURLPattern(self):
- return
r"https?:"+re.escape("//"+self.getSiteDomain()+"/stories/viewstory.php?sid=")+r"\d+$"
-
- ## Getting the chapter list and the meta data, plus 'is adult' checking.
- def extractChapterUrlsAndMetadata(self):
-
- # index=1 makes sure we see the story chapter index. Some
- # sites skip that for one-chapter stories.
- url = self.url
- logger.debug("URL: "+url)
-
- data = self.get_request(url)
-
- if "Access denied. This story has not been validated by the
adminstrators of this site." in data:
- raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access
denied. This story has not been validated by the adminstrators of this site.")
- elif "That story either does not exist on this archive or has not been
validated by the adminstrators of this site." in data:
- raise exceptions.AccessDenied(self.getSiteDomain() +" says: That
story either does not exist on this archive or has not been validated by the
adminstrators of this site.")
-
- soup = self.make_soup(data)
- # print data
-
-
- ## Title
- a = soup.find('a',
href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
- self.story.setMetadata('title',stripHTML(a))
-
- # Find authorid and URL from... author url.
- a = soup.find('div', id="mainpage").find('a',
href=re.compile(r"viewuser.php\?uid=\d+"))
- self.story.setMetadata('authorId',a['href'].split('=')[1])
-
self.story.setMetadata('authorUrl',self.getProtocol()+self.host+'/stories/'+a['href'])
- self.story.setMetadata('author',a.string)
-
- # Find the chapters:
- for chapter in soup.findAll('a',
href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+r"&chapter=\d+$")):
- # just in case there's tags, like <i> in chapter titles.
-
self.add_chapter(chapter,self.getProtocol()+self.host+'/stories/'+chapter['href'])
-
-
- # eFiction sites don't help us out a lot with their meta data
- # formating, so it's a little ugly.
-
- # utility method
- def defaultGetattr(d,k):
- try:
- return d[k]
- except:
- return ""
-
- # <span class="label">Rated:</span> NC-17<br /> etc
- labels = soup.findAll('span',{'class':'label'})
- for labelspan in labels:
- val = labelspan.nextSibling
- value = unicode('')
- while val and not 'label' in defaultGetattr(val,'class'):
- # print("val:%s"%val)
- if not isinstance(val,Comment):
- value += unicode(val)
- val = val.nextSibling
- label = labelspan.string
- # print("label:%s\nvalue:%s"%(label,value))
-
- if 'Summary' in label:
- self.setDescription(url,value)
-
- if 'Rated' in label:
- self.story.setMetadata('rating', stripHTML(value))
-
- if 'Word count' in label:
- self.story.setMetadata('numWords', stripHTML(value))
-
- if 'Categories' in label:
- cats =
labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
- for cat in cats:
- self.story.addToList('category',cat.string)
-
- if 'Characters' in label:
- chars =
labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
- for char in chars:
- self.story.addToList('characters',char.string)
-
- if 'Genre' in label:
- genres =
labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1'))
# XXX
- for genre in genres:
- self.story.addToList('genre',genre.string)
-
- if 'Pairing' in label:
- ships =
labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=4'))
- for ship in ships:
- self.story.addToList('ships',ship.string)
-
- if 'Warnings' in label:
- warnings =
labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2'))
# XXX
- for warning in warnings:
- self.story.addToList('warnings',warning.string)
-
- if 'Completed' in label:
- if 'Yes' in stripHTML(value):
- self.story.setMetadata('status', 'Completed')
- else:
- self.story.setMetadata('status', 'In-Progress')
-
- if 'Published' in label:
- self.story.setMetadata('datePublished',
makeDate(stripHTML(value), self.dateformat))
-
- if 'Updated' in label:
- self.story.setMetadata('dateUpdated',
makeDate(stripHTML(value), self.dateformat))
-
- try:
- # Find Series name from series URL.
- a = soup.find('a',
href=re.compile(r"viewseries.php\?seriesid=\d+"))
- series_name = a.string
- series_url = self.getProtocol()+self.host+'/stories/'+a['href']
-
- seriessoup = self.make_soup(self.get_request(series_url))
- # can't use ^viewstory...$ in case of higher rated stories with
javascript href.
- storyas = seriessoup.findAll('a',
href=re.compile(r'viewstory.php\?sid=\d+'))
- i=1
- for a in storyas:
- # skip 'report this' and 'TOC' links
- if 'contact.php' not in a['href'] and 'index' not in a['href']:
- if a['href'] ==
('viewstory.php?sid='+self.story.getMetadata('storyId')):
- self.setSeries(series_name, i)
- self.story.setMetadata('seriesUrl',series_url)
- break
- i+=1
-
- except:
- # I find it hard to care if the series parsing fails
- pass
-
- # grab the text for an individual chapter.
- def getChapterText(self, url):
-
- logger.debug('Getting chapter text from: %s' % url)
-
- soup = self.make_soup(self.get_request(url))
-
- div = soup.find('div', {'id' : 'story'})
-
- if None == div:
- raise exceptions.FailedToDownload("Error downloading Chapter: %s!
Missing required element!" % url)
-
- return self.utf8FromSoup(url,div)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_merengohu.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_merengohu.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_merengohu.py
1970-01-01 01:00:00.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_merengohu.py
2022-02-14 16:39:42.000000000 +0100
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 FanFicFare team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Software: eFiction
+from __future__ import absolute_import
+from .base_efiction_adapter import BaseEfictionAdapter
+
+class MerengoHuAdapter(BaseEfictionAdapter):
+
+ @classmethod
+ def getProtocol(self):
+ return "https"
+
+ @staticmethod
+ def getSiteDomain():
+ return 'merengo.hu'
+
+ @classmethod
+ def getSiteAbbrev(self):
+ return 'merengo'
+
+ @classmethod
+ def getDateFormat(self):
+ return "%Y.%m.%d"
+
+ def extractChapterUrlsAndMetadata(self):
+ ## merengo.hu has a custom 18 consent click through
+ self.get_request(self.getUrlForPhp('tizennyolc.php')+'?consent=true')
+
+ ## Call super of extractChapterUrlsAndMetadata().
+ ## base_efiction leaves the soup in self.html.
+ return super(MerengoHuAdapter, self).extractChapterUrlsAndMetadata()
+
+def getClass():
+ return MerengoHuAdapter
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_readonlymindcom.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_readonlymindcom.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_readonlymindcom.py
1970-01-01 01:00:00.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_readonlymindcom.py
2022-02-14 16:39:42.000000000 +0100
@@ -0,0 +1,194 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 FanFicFare team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+####################################################################################################
+### Based on MCStoriesComSiteAdapter and reworked by Nothorse
+###
+####################################################################################################
+from __future__ import absolute_import
+from __future__ import unicode_literals
+import logging
+logger = logging.getLogger(__name__)
+import re
+
+from ..htmlcleanup import stripHTML
+from .. import exceptions as exceptions
+
+# py2 vs py3 transition
+from ..six import text_type as unicode
+
+from .base_adapter import BaseSiteAdapter, makeDate
+
+####################################################################################################
+def getClass():
+ return ReadOnlyMindComAdapter
+
+# Class name has to be unique. Our convention is camel case the
+# sitename with Adapter at the end. www is skipped.
+class ReadOnlyMindComAdapter(BaseSiteAdapter):
+
+ def __init__(self, config, url):
+ BaseSiteAdapter.__init__(self, config, url)
+
+ # Each adapter needs to have a unique site abbreviation.
+ self.story.setMetadata('siteabbrev','rom')
+
+ self.username = "NoneGiven" # if left empty, site doesn't return any
message at all.
+ self.password = ""
+ self.is_adult=False
+
+ # Normalize story URL to the chapter index page (.../index.html)
+ m = re.match(self.getSiteURLPattern(),url)
+ if m:
+ # normalized story URL.
+
self._setURL("https://"+self.getSiteDomain()+"/@"+m.group('aut')+"/"+m.group('id')+"/")
+ else:
+ raise exceptions.InvalidStoryURL(url,
+ self.getSiteDomain(),
+ self.getSiteExampleURLs())
+
+ # get storyId from url
+ self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[2])
+
+
+ # The date format will vary from site to site.
+ #
http://docs.python.org/library/datetime.html#strftime-strptime-behavior
+ self.dateformat = "%Y-%m-%d"
+
+
+
################################################################################################
+ @staticmethod # must be @staticmethod, don't remove it.
+ def getSiteDomain():
+ return 'readonlymind.com'
+
+
################################################################################################
+ @classmethod
+ def getAcceptDomains(cls):
+
+ return ['readonlymind.com', 'www.readonlymind.com']
+
+
################################################################################################
+ @classmethod
+ def getSiteExampleURLs(self):
+ return "https://readonlymind.com/@AnAuthor/A_Story_Name/"
+
+
################################################################################################
+ def getSiteURLPattern(self):
+ return
r'https?://readonlymind\.com/@(?P<aut>[a-zA-Z0-9_]+)/(?P<id>[a-zA-Z0-9_]+)'
+
+
################################################################################################
+ def extractChapterUrlsAndMetadata(self):
+ """
+ Chapters are located at /@author/StoryName/#/
+
+ The story metadata page is at /@author/StoryName/, including a list
+ of chapters.
+ """
+ if not (self.is_adult or self.getConfig("is_adult")):
+ raise exceptions.AdultCheckRequired(self.url)
+
+ data1 = self.get_request(self.url)
+ logger.debug(self.url)
+
+ soup1 = self.make_soup(data1)
+ #strip comments from soup
+ baseUrl = "https://" + self.getSiteDomain()
+ if 'Page Not Found.' in data1:
+ raise exceptions.StoryDoesNotExist(self.url)
+
+ # Extract metadata
+ header = soup1.find('header')
+ title = header.find('h1')
+ self.story.setMetadata('title', title.text)
+
+ # Author
+ author = soup1.find('meta', attrs={"name":"author"})
+ authorurl = soup1.find('link', rel="author")
+ self.story.setMetadata('author', author.attrs["content"])
+ self.story.setMetadata('authorUrl', baseUrl + authorurl["href"])
+ self.story.setMetadata('authorId', author.attrs["content"])
+
+ # Description
+ synopsis = soup1.find('meta', attrs={"name":"description"})
+ self.story.setMetadata('description', synopsis.attrs["content"])
+
+ # Tags
+ # As these are the only tags should they go in categories?
+ # Also check for series tags in config
+ # Unfortunately there's no way to get a meaningful volume number
+ series_tags = self.getConfig('series_tags').split(',')
+
+ for a in soup1.find_all('a', class_="tag-link"):
+ strippedTag = a.text.strip('#')
+ if strippedTag in series_tags:
+ self.setSeries(strippedTag.replace('_', ' '), 0)
+ seriesUrl = baseUrl + a.attrs['href']
+ self.story.setMetadata('seriesUrl', seriesUrl);
+ else:
+ self.story.addToList('eroticatags', strippedTag)
+
+
+ # Publish and update dates
+ publishdate = soup1.find('meta', attrs={"name":"created"})
+ pDate = makeDate(publishdate.attrs['content'], self.dateformat)
+ if publishdate is not None: self.story.setMetadata('datePublished',
pDate)
+
+ # Get chapter URLs
+ chapterTable = soup1.find('section', id='chapter-list')
+ #
+ if chapterTable is not None:
+ # Multi-chapter story
+ chapterRows = chapterTable.find_all('section',
class_='story-card-large')
+ for row in chapterRows:
+ titleDiv = row.find('div', class_='story-card-title')
+ chapterCell = titleDiv.a
+ if chapterCell is not None:
+ chapterTitle = chapterCell.text
+ chapterUrl = baseUrl + chapterCell['href']
+ self.add_chapter(chapterTitle, chapterUrl)
+ dateUpdated = row.find('div',
class_='story-card-publication-date')
+ if dateUpdated is not None:
+ self.story.setMetadata('dateUpdated',
makeDate(dateUpdated.text, self.dateformat))
+
+ else:
+ # Single chapter
+ chapterTitle = self.story.getMetadata('title')
+ chapterUrl = self.url
+ self.add_chapter(chapterTitle, chapterUrl)
+
+
+ logger.debug("Story: <%s>", self.story)
+
+ return
+
+ def getChapterText(self, url):
+ """
+
+ All content is in section#chapter-content
+ """
+ logger.debug('Getting chapter text from <%s>' % url)
+ data1 = self.get_request(url)
+ soup1 = self.make_soup(data1)
+
+ #strip comments from soup
+ # [comment.extract() for comment in soup1.find_all(text=lambda
text:isinstance(text, Comment))]
+
+ # get story text
+ story1 = soup1.find('section', id='chapter-content')
+
+
+ storytext = self.utf8FromSoup(url, story1)
+
+ return storytext
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/adapter_royalroadcom.py
new/FanFicFare-4.10.0/fanficfare/adapters/adapter_royalroadcom.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/adapter_royalroadcom.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/adapter_royalroadcom.py
2022-02-14 16:39:42.000000000 +0100
@@ -174,6 +174,10 @@
self.story.setMetadata('status', 'In-Progress')
elif 'HIATUS' == label:
self.story.setMetadata('status', 'Hiatus')
+ elif 'STUB' == label:
+ self.story.setMetadata('status', 'Stub')
+ elif 'DROPPED' == label:
+ self.story.setMetadata('status', 'Dropped')
elif 'Fan Fiction' == label:
self.story.addToList('category', 'FanFiction')
elif 'Original' == label:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/adapters/base_efiction_adapter.py
new/FanFicFare-4.10.0/fanficfare/adapters/base_efiction_adapter.py
--- old/FanFicFare-4.9.0/fanficfare/adapters/base_efiction_adapter.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/adapters/base_efiction_adapter.py
2022-02-14 16:39:42.000000000 +0100
@@ -320,7 +320,7 @@
elif key == 'Word count':
self.story.setMetadata('numWords', value)
elif key == 'Completed':
- if 'Yes' in value or 'Completed' in value or 'Ja' in value:
+ if 'Yes' in value or 'Completed' in value or 'Ja' in value or
'Igen' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/browsercache/firefoxcache2.py
new/FanFicFare-4.10.0/fanficfare/browsercache/firefoxcache2.py
--- old/FanFicFare-4.9.0/fanficfare/browsercache/firefoxcache2.py
2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/browsercache/firefoxcache2.py
2022-02-14 16:39:42.000000000 +0100
@@ -97,7 +97,7 @@
self.add_key_mapping(cache_url,path,created)
self.count+=1
except Exception as e:
- logger.warn("Cache file %s failed to load, skipping."%path)
+ logger.warning("Cache file %s failed to load, skipping."%path)
logger.debug(traceback.format_exc())
# logger.debug(" file time:
%s"%datetime.datetime.fromtimestamp(stats.st_mtime))
# logger.debug("created time:
%s"%datetime.datetime.fromtimestamp(created))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/FanFicFare-4.9.0/fanficfare/browsercache/simplecache.py
new/FanFicFare-4.10.0/fanficfare/browsercache/simplecache.py
--- old/FanFicFare-4.9.0/fanficfare/browsercache/simplecache.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/browsercache/simplecache.py
2022-02-14 16:39:42.000000000 +0100
@@ -90,7 +90,7 @@
self.add_key_mapping(cache_url,path,created)
self.count+=1
except Exception as e:
- logger.warn("Cache file %s failed to load, skipping."%path)
+ logger.warning("Cache file %s failed to load, skipping."%path)
logger.debug(traceback.format_exc())
# key == filename for simple cache
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/fanficfare/cli.py
new/FanFicFare-4.10.0/fanficfare/cli.py
--- old/FanFicFare-4.9.0/fanficfare/cli.py 2022-01-11 22:58:22.000000000
+0100
+++ new/FanFicFare-4.10.0/fanficfare/cli.py 2022-02-14 16:39:42.000000000
+0100
@@ -28,7 +28,7 @@
import os, sys, platform
-version="4.9.0"
+version="4.10.0"
os.environ['CURRENT_VERSION_ID']=version
global_cache = 'global_cache'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/fanficfare/configurable.py
new/FanFicFare-4.10.0/fanficfare/configurable.py
--- old/FanFicFare-4.9.0/fanficfare/configurable.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/configurable.py 2022-02-14
16:39:42.000000000 +0100
@@ -198,7 +198,7 @@
'use_cloudscraper':(None,None,boollist),
'use_basic_cache':(None,None,boollist),
'use_nsapa_proxy':(None,None,boollist),
- 'use_flaresolverr_proxy':(None,None,boollist),
+ 'use_flaresolverr_proxy':(None,None,boollist+['withimages']),
## currently, browser_cache_path is assumed to be
## shared and only ffnet uses it so far
@@ -1004,6 +1004,10 @@
if self.getConfig('use_flaresolverr_proxy',False):
logger.debug("use_flaresolverr_proxy:%s"%self.getConfig('use_flaresolverr_proxy'))
fetchcls = flaresolverr_proxy.FlareSolverr_ProxyFetcher
+ if self.getConfig('use_flaresolverr_proxy') != 'withimages':
+ logger.warning("FlareSolverr v2+ doesn't work with images:
include_images automatically set false")
+ logger.warning("Set use_flaresolverr_proxy:withimages if
your are using FlareSolver v1 and want images")
+ self.set('overrides', 'include_images', 'false')
elif self.getConfig('use_nsapa_proxy',False):
logger.debug("use_nsapa_proxy:%s"%self.getConfig('use_nsapa_proxy'))
fetchcls = nsapa_proxy.NSAPA_ProxyFetcher
@@ -1037,7 +1041,7 @@
age_limit=self.getConfig("browser_cache_age_limit"))
fetcher.BrowserCacheDecorator(self.browser_cache).decorate_fetcher(self.fetcher)
except Exception as e:
- logger.warn("Failed to setup BrowserCache(%s)"%e)
+ logger.warning("Failed to setup BrowserCache(%s)"%e)
raise
## cache decorator terminates the chain when found.
logger.debug("use_basic_cache:%s"%self.getConfig('use_basic_cache'))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/fanficfare/defaults.ini
new/FanFicFare-4.10.0/fanficfare/defaults.ini
--- old/FanFicFare-4.9.0/fanficfare/defaults.ini 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/defaults.ini 2022-02-14
16:39:42.000000000 +0100
@@ -563,6 +563,30 @@
## used. If set to -1, all cached files will be used.
browser_cache_age_limit:4.0
+## As a (second) work around for certain sites blocking automated
+## downloads, FFF offers the ability to request pages through nsapa's
+## fanfictionnet_ff_proxy and FlareSolverr proxy servers. See
+## https://github.com/JimmXinu/FanFicFare/wiki/ProxyFeatures for more
+## details.
+
+## FlareSolverr (https://github.com/FlareSolverr/FlareSolverr) is a
+## generic proxy that works with several otherwise blocked sites.
+## It's recommended to only set use_flaresolverr_proxy:true for
+## specific sites.
+## FlareSolverr v1 doesn't work with some sites anymore (including
+## ffnet), but FlareSolverr v2+ cannot download images.
+## use_flaresolverr_proxy:true assumes FSv2 and automatically sets
+## include_images:false
+## If you want to use FSv1 with images, you can set
+## use_flaresolverr_proxy:withimages
+
+#[www.fanfiction.net]
+#use_flaresolverr_proxy:true
+## option settings, these are the defaults:
+#flaresolverr_proxy_address:localhost
+#flaresolverr_proxy_port:8191
+#flaresolverr_proxy_protocol:http
+
## Because some adapters can pull chapter URLs from human posts, the
## odds of errors in the chapter URLs can be higher for some
## sites/stories. You can set continue_on_chapter_error:true to
@@ -713,10 +737,14 @@
# previous version's include_metadata_pre Can't do on tagsfromtitle
# because that's applied to each part after split.
tagsfromtitledetect=>^[^\]\)]+$=>
+# change ][ and )( to , for [AU][Othertag] etc
+ tagsfromtitle=>\] *\[=>,
+ tagsfromtitle=>\) *\(=>,
# for QuestionableQuesting NSFW subforum.
- tagsfromtitle=>^\[NSFW\].*?(\[([^\]]+)\]|\(([^\)]+)\)).*?$=>NSFW\,\2\,\3
-# remove anything outside () or []
- tagsfromtitle=>^.*?(\[([^\]]+)\]|\(([^\)]+)\)).*?$=>\2\,\3
+
tagsfromtitle=>^\[NSFW\].*?((?P<br>\[)|(?P<pr>\())(?P<tag>(?(br)[^\]]|(?(pr)[^\)]))+)(?(br)\]|(?(pr)\))).*?$=>NSFW\,\g<tag>
+# remove anything outside () or []. Note \, at the end used to
+# prevent looping back so '[Worm(AU)]' becomes 'Worm(AU)' not just 'AU'
+
tagsfromtitle=>^.*?((?P<br>\[)|(?P<pr>\())(?P<tag>(?(br)[^\]]|(?(pr)[^\)]))+)(?(br)\]|(?(pr)\))).*?$=>\g<tag>\,
# remove () []
# tagsfromtitle=>[\(\)\[\]]=>
# shield these html entities from the ';' pattern below
@@ -1724,19 +1752,6 @@
website_encodings:Windows-1252,utf8
-[fanfic.hu]
-## website encoding(s) In theory, each website reports the character
-## encoding they use for each page. In practice, some sites report it
-## incorrectly. Each adapter has a default list, usually "utf8,
-## Windows-1252" or "Windows-1252, utf8", but this will let you
-## explicitly set the encoding and order if you need to. The special
-## value 'auto' will call chardet and use the encoding it reports if
-## it has +90% confidence. 'auto' is not reliable.
-website_encodings:ISO-8859-1,auto
-
-## Site dedicated to these categories/characters/ships
-extracategories:Harry Potter
-
[fanfic.potterheadsanonymous.com]
## Some sites do not require a login, but do require the user to
## confirm they are adult for adult content. In commandline version,
@@ -2315,6 +2330,30 @@
website_encodings: utf8:ignore, Windows-1252, iso-8859-1
+[readonlymind.com]
+## Some sites do not require a login, but do require the user to
+## confirm they are adult for adult content. In commandline version,
+## this should go in your personal.ini, not defaults.ini.
+## Login on readonlymind.com is optional and not used for adultcheck
+#is_adult:true
+
+## Clear FanFiction from defaults, site is original fiction.
+extratags:Erotica
+
+extra_valid_entries:eroticatags
+eroticatags_label:Erotica Tags
+extra_titlepage_entries: eroticatags
+
+## some tags are used as series identification. There is no way to find a
+## sequence, but at least the stories will be grouped. Use the tag as written
+## without the hash mark. Keep underscores '_' in, they will be replaced by
+## spaces in the metadata.
+#series_tags:Human_Domestication_Guide
+
+## If you want underscores replaced in the tags:
+#add_to_replace_metadata:
+# eroticatags=>_=>\s
+
[samandjack.net]
## Some sites require login (or login for some rated stories) The
## program can prompt you, or you can save it in config. In
@@ -3261,12 +3300,6 @@
# numWords=~.*
# size=~.*
-[www.hpfanficarchive.com]
-## Site dedicated to these categories/characters/ships
-extracategories:Harry Potter
-
-website_encodings:Windows-1252,utf8
-
[www.ik-eternal.net]
## Some sites require login (or login for some rated stories) The
## program can prompt you, or you can save it in config. In
@@ -3815,4 +3848,3 @@
## Clear FanFiction from defaults, site is original fiction.
extratags:
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/fanficfare/flaresolverr_proxy.py
new/FanFicFare-4.10.0/fanficfare/flaresolverr_proxy.py
--- old/FanFicFare-4.9.0/fanficfare/flaresolverr_proxy.py 2022-01-11
22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/fanficfare/flaresolverr_proxy.py 2022-02-14
16:39:42.000000000 +0100
@@ -69,12 +69,15 @@
'url':url,
#'userAgent': 'Mozilla/5.0',
'maxTimeout': 30000,
- 'download': True,
# download:True causes response to be base64 encoded
# which makes images work.
'cookies':cookiejar_to_jsonable(self.get_cookiejar()),
'postData':encode_params(parameters),
}
+ if self.getConfig('use_flaresolverr_proxy') == 'withimages':
+ # download param removed in FlareSolverr v2+, but optional
+ # for FFF users still on FlareSolver v1.
+ fs_data['download'] = True
if self.fs_session:
fs_data['session']=self.fs_session
@@ -111,22 +114,33 @@
url = resp.json['solution']['url']
for c in cookiejson_to_jarable(resp.json['solution']['cookies']):
self.get_cookiejar().set_cookie(c)
- if resp.json.get('version','').startswith('v2.'):
- # FlareSolverr v2 detected, don't need base64 decode,
- # and image downloads won't work.
+ data = None
+ ## FSv2 check removed in favor of
+ ## use_flaresolverr_proxy:withimages in the hope one day
+ ## FS will have download option again.
+ if self.getConfig('use_flaresolverr_proxy') == 'withimages':
+ try:
+ # v1 flaresolverr has 'download' option.
+ data = base64.b64decode(resp.json['solution']['response'])
+ except Exception as e:
+ logger.warning("Base64 decode of FlareSolverr response
failed. FSv2 doesn't work with use_flaresolverr_proxy:withimages.")
+ ## Allows for user misconfiguration, IE,
+ ## use_flaresolverr_proxy:withimages with FSv2. Warning
+ ## instead of error out--until they hit an image and crash
+ ## FSv2.2 at least. But hopefully that will be fixed.
+ if data is None:
+ # Without download (or with FlareSolverr v2), don't
+ # need base64 decode, and image downloads won't work.
if 'image' in resp.json['solution']['headers']['content-type']:
raise exceptions.HTTPErrorFFF(
url,
428, # 404 & 410 trip StoryDoesNotExist
# 428 ('Precondition Required') gets the
# error_msg through to the user.
- "FlareSolverr v2 doesn't support image download.",#
error_msg
+ "FlareSolverr v2 doesn't support image download (or
use_flaresolverr_proxy!=withimages)",# error_msg
None # data
)
data = resp.json['solution']['response']
- else:
- # v1 flaresolverr has 'download' option.
- data = base64.b64decode(resp.json['solution']['response'])
else:
logger.debug("flaresolverr error resp:")
logger.debug(json.dumps(resp.json, sort_keys=True,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/fanficfare/geturls.py
new/FanFicFare-4.10.0/fanficfare/geturls.py
--- old/FanFicFare-4.9.0/fanficfare/geturls.py 2022-01-11 22:58:22.000000000
+0100
+++ new/FanFicFare-4.10.0/fanficfare/geturls.py 2022-02-14 16:39:42.000000000
+0100
@@ -179,7 +179,7 @@
href = adapter.get_request_redirected(href)[1]
href = href.replace('&index=1','')
except Exception as e:
- logger.warn("Skipping royalroad email URL %s, got HTTP error
%s"%(href,e))
+ logger.warning("Skipping royalroad email URL %s, got HTTP error
%s"%(href,e))
return href
def get_urls_from_imap(srv,user,passwd,folder,markread=True):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/FanFicFare-4.9.0/setup.py
new/FanFicFare-4.10.0/setup.py
--- old/FanFicFare-4.9.0/setup.py 2022-01-11 22:58:22.000000000 +0100
+++ new/FanFicFare-4.10.0/setup.py 2022-02-14 16:39:42.000000000 +0100
@@ -26,7 +26,7 @@
name=package_name,
# Versions should comply with PEP440.
- version="4.9.0",
+ version="4.10.0",
description='A tool for downloading fanfiction to eBook formats',
long_description=long_description,