logger.warning("entry()=%d does not contain 'domain' - SKIPPED!", len(entry))
continue
elif not utils.is_domain_wanted(entry["domain"]):
- logger.debug("entry[domain]='%s' is not wanted - SKIPPED!")
+ logger.debug("entry[domain]='%s' is not wanted - SKIPPED!", entry["domain"])
continue
elif instances.is_registered(entry["domain"]):
logger.debug("entry[domain]='%s' is already registered - SKIPPED!", entry["domain"])
for software in types:
logger.debug("software='%s' - BEFORE!", software)
if args.software is not None and args.software != software:
- logger.debug("args.software='%s' does not match software='%s' - SKIPPED!")
+ logger.debug("args.software='%s' does not match software='%s' - SKIPPED!", args.software, software)
continue
doc = None
logger.debug("Querying API path='%s' was successful: domain='%s',data[json][%s]()=%d", path, domain, type(data['json']), len(data['json']))
peers = data["json"]
- logger.debug("Marking domain='%s' as successfully handled ...")
+ logger.debug("Marking domain='%s' as successfully handled ...", domain)
instances.set_success(domain)
break
if "error_message" not in data:
nodeinfo = data["json"]
- logger.debug("Marking domain='%s' as successfully handled ...")
+ logger.debug("Marking domain='%s' as successfully handled ...", domain)
instances.set_success(domain)
logger.debug("Found entries: nodeinfo()=%d,domain='%s'", len(nodeinfo), domain)
instances.set_detection_mode(domain, "AUTO_DISCOVERY")
instances.set_nodeinfo_url(domain, link["href"])
- logger.debug("Marking domain='%s' as successfully handled ...")
+ logger.debug("Marking domain='%s' as successfully handled ...", domain)
instances.set_success(domain)
break
else:
+ logger.debug("Setting last error for domain='%s',data[]='%s'", domain, type(data))
instances.set_last_error(domain, data)
else:
logger.warning("Unknown 'rel' value: domain='%s',link[rel]='%s'", domain, link["rel"])
logger.debug("peer is empty - SKIPPED")
continue
elif isinstance(peer, dict) and "domain" in peer:
- logger.debug("peer[domain]='%s'", peer['domain'])
+ logger.debug("peer[domain]='%s'", peer["domain"])
peer = tidyup.domain(peer["domain"])
elif isinstance(peer, str):
logger.debug("peer='%s'", peer)
logger.debug("peer='%s' is not wanted - SKIPPED!", peer)
continue
- logger.debug("Adding peer='%s' ...", peer)
+ logger.debug("Appending peer='%s' ...", peer)
peers.append(peer)
logger.debug("peers()=%d - EXIT!", len(peers))
logger.debug("sql_string='%s',fields()=%d", sql_string, len(fields))
sql_string = "UPDATE instances SET" + sql_string + " last_updated = ? WHERE domain = ? LIMIT 1"
- logger.debug("Executing SQL: '%s'", sql_string)
+ logger.debug("Executing SQL: sql_string='%s',fields()=%d", sql_string, len(fields))
database.cursor.execute(sql_string, fields)
logger.debug("rowcount=%d", database.cursor.rowcount)
domain_helper.raise_on(domain)
if not isinstance(peers, list):
- raise ValueError(f"Parameter peers[]='{type(peers)}' is not 'list': '%s'")
+ raise ValueError(f"Parameter peers[]='{type(peers)}' is not 'list'")
# Set timestamp
_set_data("total_peers", domain, len(peers))
logger.debug("Found federated_instances for domain='%s'", domain)
peers = peers + federation.add_peers(data["json"]["federated_instances"])
- logger.debug("Marking domain='%s' as successfully handled ...")
+ logger.debug("Marking domain='%s' as successfully handled ...", domain)
instances.set_success(domain)
if len(peers) == 0:
logger.debug("Appending peer='%s' ...", peer)
peers.append(peer)
- logger.debug("Marking domain='%s' as successfully handled ...")
+ logger.debug("Marking domain='%s' as successfully handled ...", domain)
instances.set_success(domain)
except network.exceptions as exception:
# Getting blocklist
rows = data["json"]
- logger.debug("Marking domain='%s' as successfully handled ...")
+ logger.debug("Marking domain='%s' as successfully handled ...", domain)
instances.set_success(domain)
if len(rows) == 0:
"block_level": block["severity"]
})
else:
- logger.debug("domain='%s' has no block list")
+ logger.debug("domain='%s' has no block list", domain)
except network.exceptions as exception:
logger.warning("domain='%s',exception[%s]='%s'", domain, type(exception), str(exception))
return list()
for header in doc.find_all("h2"):
+ logger.debug("header[%s]='%s'", type(header), header)
header_text = tidyup.reason(header.text)
logger.debug("header_text='%s' - BEFORE!", header_text)
elif command == "":
raise ValueError("Parameter 'command' is empty")
- logger.debug("domain='%s' - BEFORE!")
+ logger.debug("domain='%s' - BEFORE!", domain)
domain = deobfuscate_domain(domain, blocker)
logger.debug("domain='%s' - DEOBFUSCATED!", domain)
logger.warning("Exception '%s' during fetching instances (%s) from domain='%s'", type(exception), command, domain)
instances.set_last_error(domain, exception)
- logger.debug("Checking if domain='%s' has pending updates ...")
+ logger.debug("Checking if domain='%s' has pending updates ...", domain)
if instances.has_pending(domain):
- logger.debug("Flushing updates for domain='%s' ...")
+ logger.debug("Flushing updates for domain='%s' ...", domain)
instances.update_data(domain)
logger.debug("processed='%s' - EXIT!", processed)
domain = tidyup.domain(tag.find("em").contents[0])
if not is_domain_wanted(domain):
- logger.debug("domain='%s' is not wanted - SKIPPED!")
+ logger.debug("domain='%s' is not wanted - SKIPPED!", domain)
continue
logger.debug("Appending domain='%s'", domain)