namespace Friendica\Core;
use Friendica\BaseObject;
+use Friendica\Core;
use Friendica\Database\DBA;
use Friendica\Model\Process;
use Friendica\Util\DateTimeFormat;
-use Friendica\Util\Logger\WorkerLogger;
use Friendica\Util\Network;
/**
class Worker
{
const STATE_STARTUP = 1; // Worker is in startup. This takes most time.
- const STATE_SHORT_LOOP = 2; // Worker is processing preassigned jobs, thus saving much time.
+ const STATE_LONG_LOOP = 2; // Worker is processing the whole - long - loop.
const STATE_REFETCH = 3; // Worker had refetched jobs in the execution loop.
- const STATE_LONG_LOOP = 4; // Worker is processing the whole - long - loop.
+ const STATE_SHORT_LOOP = 4; // Worker is processing preassigned jobs, thus saving much time.
+
+ const FAST_COMMANDS = ['APDelivery', 'Delivery', 'CreateShadowEntry'];
+
private static $up_start;
private static $db_duration = 0;
// We fetch the next queue entry that is about to be executed
while ($r = self::workerProcess()) {
+ $refetched = false;
foreach ($r as $entry) {
// Assure that the priority is an integer value
$entry['priority'] = (int)$entry['priority'];
return;
}
- // If possible we will fetch new jobs for this worker
- if (!self::getWaitingJobForPID() && Lock::acquire('worker_process', 0)) {
+ // Trying to fetch new processes - but only once when successful
+ if (!$refetched && Lock::acquire('worker_process', 0)) {
self::findWorkerProcesses();
Lock::release('worker_process');
self::$state = self::STATE_REFETCH;
+ $refetched = true;
+ } else {
+ self::$state = self::STATE_SHORT_LOOP;
}
}
- if (self::$state != self::STATE_REFETCH) {
+ // To avoid the quitting of multiple workers only one worker at a time will execute the check
+ if (!self::getWaitingJobForPID()) {
self::$state = self::STATE_LONG_LOOP;
- }
- // To avoid the quitting of multiple workers only one worker at a time will execute the check
- if (Lock::acquire('worker', 0)) {
+ if (Lock::acquire('worker', 0)) {
// Count active workers and compare them with a maximum value that depends on the load
- if (self::tooMuchWorkers()) {
- Logger::log('Active worker limit reached, quitting.', Logger::DEBUG);
- Lock::release('worker');
- return;
- }
+ if (self::tooMuchWorkers()) {
+ Logger::log('Active worker limit reached, quitting.', Logger::DEBUG);
+ Lock::release('worker');
+ return;
+ }
- // Check free memory
- if ($a->isMinMemoryReached()) {
- Logger::log('Memory limit reached, quitting.', Logger::DEBUG);
+ // Check free memory
+ if ($a->isMinMemoryReached()) {
+ Logger::log('Memory limit reached, quitting.', Logger::DEBUG);
+ Lock::release('worker');
+ return;
+ }
Lock::release('worker');
- return;
}
- Lock::release('worker');
}
// Quit the worker once every cron interval
private static function deferredEntries()
{
$stamp = (float)microtime(true);
- $count = DBA::count('workerqueue', ["NOT `done` AND `pid` = 0 AND `next_try` > ?", DateTimeFormat::utcNow()]);
+ $count = DBA::count('workerqueue', ["NOT `done` AND `pid` = 0 AND `retrial` > ?", 0]);
self::$db_duration += (microtime(true) - $stamp);
self::$db_duration_count += (microtime(true) - $stamp);
return $count;
$argc = count($argv);
- $logger = $a->getLogger();
- $workerLogger = new WorkerLogger($logger, $funcname);
+ Logger::enableWorker($funcname);
- $workerLogger ->info("Process start.", ['priority' => $queue["priority"], 'id' => $queue["id"]]);
+ Logger::info("Process start.", ['priority' => $queue["priority"], 'id' => $queue["id"]]);
$stamp = (float)microtime(true);
unset($_SESSION);
// Set the workerLogger as new default logger
- Logger::init($workerLogger);
if ($method_call) {
call_user_func_array(sprintf('Friendica\Worker\%s::execute', $funcname), $argv);
} else {
$funcname($argv, $argc);
}
- Logger::init($logger);
+
+ Logger::disableWorker();
unset($a->queue);
$rest = round(max(0, $up_duration - (self::$db_duration + self::$lock_duration)), 2);
$exec = round($duration, 2);
- $logger->info('Performance:', ['state' => self::$state, 'count' => $dbcount, 'stat' => $dbstat, 'write' => $dbwrite, 'lock' => $dblock, 'total' => $dbtotal, 'rest' => $rest, 'exec' => $exec]);
+ Logger::info('Performance:', ['state' => self::$state, 'count' => $dbcount, 'stat' => $dbstat, 'write' => $dbwrite, 'lock' => $dblock, 'total' => $dbtotal, 'rest' => $rest, 'exec' => $exec]);
self::$up_start = microtime(true);
self::$db_duration = 0;
self::$db_duration_stat = 0;
self::$db_duration_write = 0;
self::$lock_duration = 0;
- self::$state = self::STATE_SHORT_LOOP;
if ($duration > 3600) {
- $logger->info('Longer than 1 hour.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
+ Logger::info('Longer than 1 hour.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
} elseif ($duration > 600) {
- $logger->info('Longer than 10 minutes.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
+ Logger::info('Longer than 10 minutes.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
} elseif ($duration > 300) {
- $logger->info('Longer than 5 minutes.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
+ Logger::info('Longer than 5 minutes.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
} elseif ($duration > 120) {
- $logger->info('Longer than 2 minutes.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
+ Logger::info('Longer than 2 minutes.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration/60, 3)]);
}
- $workerLogger->info('Process done.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration, 3)]);
+ Logger::info('Process done.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'duration' => round($duration, 3)]);
$a->getProfiler()->saveLog($a->getLogger(), "ID " . $queue["id"] . ": " . $funcname);
$cooldown = Config::get("system", "worker_cooldown", 0);
if ($cooldown > 0) {
- $logger->info('Cooldown.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'cooldown' => $cooldown]);
+ Logger::info('Cooldown.', ['priority' => $queue["priority"], 'id' => $queue["id"], 'cooldown' => $cooldown]);
sleep($cooldown);
}
}
*/
private static function tooMuchWorkers()
{
- $queues = Config::get("system", "worker_queues", 4);
+ $queues = Config::get("system", "worker_queues", 10);
$maxqueues = $queues;
// Decrease the number of workers at higher load
$load = System::currentLoad();
if ($load) {
- $maxsysload = intval(Config::get("system", "maxloadavg", 50));
+ $maxsysload = intval(Config::get("system", "maxloadavg", 20));
/* Default exponent 3 causes queues to rapidly decrease as load increases.
* If you have 20 max queues at idle, then you get only 5 queues at 37.1% of $maxsysload.
DBA::close($processes);
}
DBA::close($jobs);
- $entries = $deferred + $waiting_processes;
} else {
- $entries = self::totalEntries();
- $waiting_processes = max(0, $entries - $deferred);
+ $waiting_processes = self::totalEntries();
$stamp = (float)microtime(true);
$jobs = DBA::p("SELECT COUNT(*) AS `running`, `priority` FROM `process` INNER JOIN `workerqueue` ON `workerqueue`.`pid` = `process`.`pid` AND NOT `done` GROUP BY `priority` ORDER BY `priority`");
self::$db_duration += (microtime(true) - $stamp);
$processlist .= ' ('.implode(', ', $listitem).')';
- if (Config::get("system", "worker_fastlane", false) && ($queues > 0) && self::entriesExists() && ($active >= $queues)) {
+ if (Config::get("system", "worker_fastlane", false) && ($queues > 0) && ($active >= $queues) && self::entriesExists()) {
$top_priority = self::highestPriority();
$high_running = self::processWithPriorityActive($top_priority);
Logger::log("Load: " . $load ."/" . $maxsysload . " - processes: " . $deferred . "/" . $active . "/" . $waiting_processes . $processlist . " - maximum: " . $queues . "/" . $maxqueues, Logger::DEBUG);
// Are there fewer workers running as possible? Then fork a new one.
- if (!Config::get("system", "worker_dont_fork", false) && ($queues > ($active + 1)) && ($entries > 1)) {
+ if (!Config::get("system", "worker_dont_fork", false) && ($queues > ($active + 1)) && self::entriesExists()) {
Logger::log("Active workers: ".$active."/".$queues." Fork a new worker.", Logger::DEBUG);
if (Config::get('system', 'worker_daemon_mode', false)) {
self::IPCSetJobState(true);
return [];
}
- if ($priority <= PRIORITY_MEDIUM) {
- $limit = Config::get('system', 'worker_fetch_limit', 1);
- } else {
- $limit = 1;
- }
+ $limit = Config::get('system', 'worker_fetch_limit', 1);
$ids = [];
$stamp = (float)microtime(true);
$condition = ["`priority` = ? AND `pid` = 0 AND NOT `done` AND `next_try` < ?", $priority, DateTimeFormat::utcNow()];
- $tasks = DBA::select('workerqueue', ['id'], $condition, ['limit' => $limit, 'order' => ['created']]);
+ $tasks = DBA::select('workerqueue', ['id', 'parameter'], $condition, ['limit' => $limit, 'order' => ['created']]);
self::$db_duration += (microtime(true) - $stamp);
while ($task = DBA::fetch($tasks)) {
$ids[] = $task['id'];
+ // Only continue that loop while we are storing commands that can be processed quickly
+ $command = json_decode($task['parameter'])[0];
+ if (!in_array($command, self::FAST_COMMANDS)) {
+ break;
+ }
}
DBA::close($tasks);
- Logger::info('Found:', ['id' => $ids, 'priority' => $priority]);
+ Logger::info('Found:', ['priority' => $priority, 'id' => $ids]);
return $ids;
}
// If there is no result we check without priority limit
if (empty($ids)) {
+ $limit = Config::get('system', 'worker_fetch_limit', 1);
+
$stamp = (float)microtime(true);
$condition = ["`pid` = 0 AND NOT `done` AND `next_try` < ?", DateTimeFormat::utcNow()];
- $result = DBA::select('workerqueue', ['id'], $condition, ['limit' => 1, 'order' => ['priority', 'created']]);
+ $tasks = DBA::select('workerqueue', ['id', 'parameter'], $condition, ['limit' => $limit, 'order' => ['priority', 'created']]);
self::$db_duration += (microtime(true) - $stamp);
- while ($id = DBA::fetch($result)) {
- $ids[] = $id["id"];
+ while ($task = DBA::fetch($tasks)) {
+ $ids[] = $task['id'];
+ // Only continue that loop while we are storing commands that can be processed quickly
+ $command = json_decode($task['parameter'])[0];
+ if (!in_array($command, self::FAST_COMMANDS)) {
+ break;
+ }
}
- DBA::close($result);
+ DBA::close($tasks);
}
if (!empty($ids)) {
}
$url = System::baseUrl()."/worker";
- Network::fetchUrl($url, false, $redirects, 1);
+ Network::fetchUrl($url, false, 1);
}
/**
$args = ['no_cron' => !$do_cron];
- get_app()->proc_run($command, $args);
+ $a = get_app();
+ $process = new Core\Process($a->getLogger(), $a->getMode(), $a->getConfig(), $a->getBasePath());
+ $process->run($command, $args);
// after spawning we have to remove the flag.
if (Config::get('system', 'worker_daemon_mode', false)) {
* @param (integer|array) priority or parameter array, strings are deprecated and are ignored
*
* next args are passed as $cmd command line
- * or: Worker::add(PRIORITY_HIGH, "Notifier", "drop", $drop_id);
+ * or: Worker::add(PRIORITY_HIGH, "Notifier", Delivery::DELETION, $drop_id);
* or: Worker::add(array('priority' => PRIORITY_HIGH, 'dont_fork' => true), "CreateShadowEntry", $post_id);
*
* @return boolean "false" if proc_run couldn't be executed
}
$priority = PRIORITY_MEDIUM;
- $dont_fork = Config::get("system", "worker_dont_fork", false);
+ // Don't fork from frontend tasks by default
+ $dont_fork = Config::get("system", "worker_dont_fork", false) || !\get_app()->getMode()->isBackend();
$created = DateTimeFormat::utcNow();
$force_priority = false;
return true;
}
+ /**
+ * Returns the next retrial level for worker jobs.
+ * This function will skip levels when jobs are older.
+ *
+ * @param array $queue Worker queue entry
+ * @param integer $max_level maximum retrial level
+ * @return integer the next retrial level value
+ */
+ private static function getNextRetrial($queue, $max_level)
+ {
+ $created = strtotime($queue['created']);
+ $retrial_time = time() - $created;
+
+ $new_retrial = $queue['retrial'] + 1;
+ $total = 0;
+ for ($retrial = 0; $retrial <= $max_level + 1; ++$retrial) {
+ $delay = (($retrial + 3) ** 4) + (rand(1, 30) * ($retrial + 1));
+ $total += $delay;
+ if (($total < $retrial_time) && ($retrial > $queue['retrial'])) {
+ $new_retrial = $retrial;
+ }
+ }
+ Logger::info('New retrial for task', ['id' => $queue['id'], 'created' => $queue['created'], 'old' => $queue['retrial'], 'new' => $new_retrial]);
+ return $new_retrial;
+ }
+
/**
* Defers the current worker entry
*/
$id = $queue['id'];
$priority = $queue['priority'];
- if ($retrial > 14) {
- Logger::log('Id ' . $id . ' had been tried 14 times. We stop now.', Logger::DEBUG);
+ $max_level = Config::get('system', 'worker_defer_limit');
+
+ $new_retrial = self::getNextRetrial($queue, $max_level);
+
+ if ($new_retrial > $max_level) {
+ Logger::info('The task exceeded the maximum retry count', ['id' => $id, 'max_level' => $max_level, 'retrial' => $new_retrial]);
return;
}
// Calculate the delay until the next trial
- $delay = (($retrial + 3) ** 4) + (rand(1, 30) * ($retrial + 1));
+ $delay = (($new_retrial + 2) ** 4) + (rand(1, 30) * ($new_retrial));
$next = DateTimeFormat::utc('now + ' . $delay . ' seconds');
- if (($priority < PRIORITY_MEDIUM) && ($retrial > 2)) {
+ if (($priority < PRIORITY_MEDIUM) && ($new_retrial > 3)) {
$priority = PRIORITY_MEDIUM;
- } elseif (($priority < PRIORITY_LOW) && ($retrial > 5)) {
+ } elseif (($priority < PRIORITY_LOW) && ($new_retrial > 6)) {
$priority = PRIORITY_LOW;
- } elseif (($priority < PRIORITY_NEGLIGIBLE) && ($retrial > 7)) {
+ } elseif (($priority < PRIORITY_NEGLIGIBLE) && ($new_retrial > 8)) {
$priority = PRIORITY_NEGLIGIBLE;
}
- Logger::log('Defer execution ' . $retrial . ' of id ' . $id . ' to ' . $next . ' - priority old/new: ' . $queue['priority'] . '/' . $priority, Logger::DEBUG);
+ Logger::info('Deferred task', ['id' => $id, 'retrial' => $new_retrial, 'next_execution' => $next, 'old_prio' => $queue['priority'], 'new_prio' => $priority]);
$stamp = (float)microtime(true);
- $fields = ['retrial' => $retrial + 1, 'next_try' => $next, 'executed' => DBA::NULL_DATETIME, 'pid' => 0, 'priority' => $priority];
+ $fields = ['retrial' => $new_retrial, 'next_try' => $next, 'executed' => DBA::NULL_DATETIME, 'pid' => 0, 'priority' => $priority];
DBA::update('workerqueue', $fields, ['id' => $id]);
self::$db_duration += (microtime(true) - $stamp);
self::$db_duration_write += (microtime(true) - $stamp);