]> git.mxchange.org Git - fba.git/commitdiff
Continued:
authorRoland Häder <roland@mxchange.org>
Wed, 21 Jun 2023 13:02:23 +0000 (15:02 +0200)
committerRoland Häder <roland@mxchange.org>
Wed, 21 Jun 2023 13:14:23 +0000 (15:14 +0200)
- some lemmy APIs return NULL (NoneType) in 'federated_instances' field
- fixed some logging messages

fba/commands.py
fba/http/federation.py
fba/networks/friendica.py
fba/networks/lemmy.py
fba/networks/mastodon.py
fba/networks/pleroma.py

index 65965780767b8ca20f4b0754915beb96dd743097..a55e8a35a31e9d98290579f3c06dd6a53e99ac8b 100644 (file)
@@ -357,7 +357,7 @@ def fetch_observer(args: argparse.Namespace):
             logger.debug(f"raw[{type(raw)}]()={len(raw)}")
 
             doc = bs4.BeautifulSoup(raw, features='html.parser')
-            logger.debug("doc[]='%'", type(doc))
+            logger.debug("doc[]='%s'", type(doc))
         except network.exceptions as exception:
             logger.warning(f"Cannot fetch software='{software}' from fediverse.observer: '{type(exception)}'")
             continue
@@ -376,7 +376,7 @@ def fetch_observer(args: argparse.Namespace):
                 logger.debug("domain='%s' is blacklisted - SKIPPED!", domain)
                 continue
             elif instances.is_registered(domain):
-                logger.debug(f"domain='{domain}' is already registered - SKIPPED!")
+                logger.debug("domain='%s' is already registered - SKIPPED!", domain)
                 continue
 
             logger.info(f"Fetching instances for domain='{domain}',software='{software}'")
@@ -479,10 +479,10 @@ def fetch_fba_rss(args: argparse.Namespace):
                 logger.debug("domain='%s' is blacklisted - SKIPPED!", domain)
                 continue
             elif domain in domains:
-                logger.debug(f"domain='{domain}' is already added - SKIPPED!")
+                logger.debug("domain='%s' is already added - SKIPPED!", domain)
                 continue
             elif instances.is_registered(domain):
-                logger.debug(f"domain='{domain}' is already registered - SKIPPED!")
+                logger.debug("domain='%s' is already registered - SKIPPED!", domain)
                 continue
 
             logger.debug(f"Adding domain='{domain}'")
@@ -524,10 +524,10 @@ def fetch_fbabot_atom(args: argparse.Namespace):
         for entry in atom.entries:
             logger.debug(f"entry[]='{type(entry)}'")
             doc = bs4.BeautifulSoup(entry.content.value, "html.parser")
-            logger.debug("doc[]='%'", type(doc))
+            logger.debug("doc[]='%s'", type(doc))
             for element in doc.findAll("a"):
                 for href in element["href"].split(","):
-                    logger.debug(f"href[{type(href)}]={href}")
+                    logger.debug("href[%s]='%s", type(href), href)
                     domain = tidyup.domain(href)
 
                     logger.debug("domain='%s'", domain)
@@ -535,10 +535,10 @@ def fetch_fbabot_atom(args: argparse.Namespace):
                         logger.debug("domain='%s' is blacklisted - SKIPPED!", domain)
                         continue
                     elif domain in domains:
-                        logger.debug(f"domain='{domain}' is already added - SKIPPED!")
+                        logger.debug("domain='%s' is already added - SKIPPED!", domain)
                         continue
                     elif instances.is_registered(domain):
-                        logger.debug(f"domain='{domain}' is already registered - SKIPPED!")
+                        logger.debug("domain='%s' is already registered - SKIPPED!", domain)
                         continue
 
                     logger.debug(f"Adding domain='{domain}',domains()={len(domains)}")
index 65fa1ef0fcca07a23edc4d9e1c88192b78bb54ab..a7b2db4621c87d2738053ed6158c1a06a2da7a15 100644 (file)
@@ -637,7 +637,10 @@ def find_domains(tag: bs4.element.Tag) -> list:
     return domains
 
 def add_peers(rows: dict) -> list:
-    logger.debug(f"rows()={len(rows)} - CALLED!")
+    logger.debug(f"rows[]={type(rows)} - CALLED!")
+    if not isinstance(rows, dict):
+        raise ValueError(f"Parameter rows[]='{type(rows)}' is not 'dict'")
+
     peers = list()
     for key in ["linked", "allowed", "blocked"]:
         logger.debug(f"Checking key='{key}'")
index ddeb6089a00f70e01c982613f86f471bd00856fb..b3f6bc40d342ceee218fa42b6567b4a2026ef940 100644 (file)
@@ -59,7 +59,7 @@ def fetch_blocks(domain: str) -> dict:
             ).text,
             "html.parser",
         )
-        logger.debug("doc[]='%'", type(doc))
+        logger.debug("doc[]='%s'", type(doc))
 
         block_tag = doc.find(id="about_blocklist")
     except network.exceptions as exception:
index 172e1f85e2769740ebc611a8bff9c8a0ba0539cb..408d863c55d6634264aedfdd1ce2a37f734ec39f 100644 (file)
@@ -77,12 +77,12 @@ def fetch_peers(domain: str) -> list:
         if "error_message" in data:
             logger.warning("Could not reach any JSON API:", domain)
             instances.set_last_error(domain, data)
-        elif "federated_instances" in data["json"]:
+        elif "federated_instances" in data["json"] and isinstance(data["json"]["federated_instances"], dict):
             logger.debug(f"Found federated_instances for domain='{domain}'")
             peers = peers + federation.add_peers(data["json"]["federated_instances"])
             logger.debug("Added instance(s) to peers")
         else:
-            logger.warning("JSON response does not contain 'federated_instances':", domain)
+            logger.warning("JSON response does not contain 'federated_instances', domain='%s'", domain)
             instances.set_last_error(domain, data)
 
     except network.exceptions as exception:
index 3f90a9b7926058c2b87938776e3b92873352f4fd..21e8a7352a023ba599b72979c95b3c0bc71c3e6f 100644 (file)
@@ -108,7 +108,7 @@ def fetch_blocks_from_about(domain: str) -> dict:
         "Silenced servers" : [],
     }
 
-    logger.debug("doc[]='%'", type(doc))
+    logger.debug("doc[]='%s'", type(doc))
     if doc is None:
         logger.warning(f"Cannot fetch any /about pages for domain='{domain}' - EXIT!")
         return blocklist
index c7963bb67272c5dbfe19d0011f4387b7a5d91860..bfcf68ac807c03fba162ab90be377caea9be4830 100644 (file)
@@ -563,7 +563,7 @@ def fetch_blocks_from_about(domain: str) -> dict:
                 "html.parser",
             )
 
-            logger.debug("doc[]='%'", type(doc))
+            logger.debug("doc[]='%s'", type(doc))
             if doc.find("h2") is not None:
                 logger.debug(f"Found 'h2' header in path='{path}' - BREAK!")
                 break
@@ -580,7 +580,7 @@ def fetch_blocks_from_about(domain: str) -> dict:
         "Silenced servers" : [],
     }
 
-    logger.debug("doc[]='%'", type(doc))
+    logger.debug("doc[]='%s'", type(doc))
     if doc is None:
         logger.warning(f"Cannot fetch any /about pages for domain='{domain}' - EXIT!")
         return blocklist