X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;f=classes%2FInbox.php;h=a1ab6215fd3a018cfcd2724522443e1fbb8d888f;hb=3ddfa4de931f4eb3083ac877898b5ee8b03a82f1;hp=086dba1c9dbe266613dd84bfc3cd011673867139;hpb=e3dc45d103666b090777a1304ca093ab8f45c803;p=quix0rs-gnu-social.git diff --git a/classes/Inbox.php b/classes/Inbox.php index 086dba1c9d..a1ab6215fd 100644 --- a/classes/Inbox.php +++ b/classes/Inbox.php @@ -32,6 +32,7 @@ require_once INSTALLDIR.'/classes/Memcached_DataObject.php'; class Inbox extends Memcached_DataObject { const BOXCAR = 128; + const MAX_NOTICES = 1024; ###START_AUTOCODE /* the code below is auto generated do not remove the above tag */ @@ -54,7 +55,6 @@ class Inbox extends Memcached_DataObject /** * Create a new inbox from existing Notice_inbox stuff */ - static function initialize($user_id) { $inbox = Inbox::fromNoticeInbox($user_id); @@ -81,7 +81,7 @@ class Inbox extends Memcached_DataObject $ni->selectAdd(); $ni->selectAdd('notice_id'); $ni->orderBy('notice_id DESC'); - $ni->limit(0, 1024); + $ni->limit(0, self::MAX_NOTICES); if ($ni->find()) { while($ni->fetch()) { @@ -95,17 +95,31 @@ class Inbox extends Memcached_DataObject $inbox = new Inbox(); $inbox->user_id = $user_id; - $inbox->notice_ids = call_user_func_array('pack', array_merge(array('N*'), $ids)); + $inbox->pack($ids); $inbox->fake = true; return $inbox; } + /** + * Append the given notice to the given user's inbox. + * Caching updates are managed for the inbox itself. + * + * If the notice is already in this inbox, the second + * add will be silently dropped. + * + * @param int @user_id + * @param int $notice_id + * @return boolean success + */ static function insertNotice($user_id, $notice_id) { - $inbox = DB_DataObject::staticGet('inbox', 'user_id', $user_id); - - if (empty($inbox)) { + // Going straight to the DB rather than trusting our caching + // during an update. Note: not using DB_DataObject::staticGet, + // which is unsafe to use directly (in-process caching causes + // memory leaks, which accumulate in queue processes). + $inbox = new Inbox(); + if (!$inbox->get('user_id', $user_id)) { $inbox = Inbox::initialize($user_id); } @@ -113,18 +127,23 @@ class Inbox extends Memcached_DataObject return false; } + $ids = $inbox->unpack(); + if (in_array(intval($notice_id), $ids)) { + // Already in there, we probably re-ran some inbox adds + // due to an error. Skip the dupe silently. + return true; + } + $result = $inbox->query(sprintf('UPDATE inbox '. 'set notice_ids = concat(cast(0x%08x as binary(4)), '. - 'substr(notice_ids, 1, 4092)) '. + 'substr(notice_ids, 1, %d)) '. 'WHERE user_id = %d', - $notice_id, $user_id)); + $notice_id, + 4 * (self::MAX_NOTICES - 1), + $user_id)); if ($result) { - $c = self::memcache(); - - if (!empty($c)) { - $c->delete(self::cacheKey('inbox', 'user_id', $user_id)); - } + self::blow('inbox:user_id:%d', $user_id); } return $result; @@ -138,7 +157,7 @@ class Inbox extends Memcached_DataObject } } - function stream($user_id, $offset, $limit, $since_id, $max_id, $since, $own=false) + function stream($user_id, $offset, $limit, $since_id, $max_id, $own=false) { $inbox = Inbox::staticGet('user_id', $user_id); @@ -151,7 +170,7 @@ class Inbox extends Memcached_DataObject } } - $ids = unpack('N*', $inbox->notice_ids); + $ids = $inbox->unpack(); if (!empty($since_id)) { $newids = array(); @@ -177,4 +196,74 @@ class Inbox extends Memcached_DataObject return $ids; } + + /** + * Wrapper for Inbox::stream() and Notice::getStreamByIds() returning + * additional items up to the limit if we were short due to deleted + * notices still being listed in the inbox. + * + * The fast path (when no items are deleted) should be just as fast; the + * offset parameter is applied *before* lookups for maximum efficiency. + * + * This means offset-based paging may show duplicates, but similar behavior + * already exists when new notices are posted between page views, so we + * think people will be ok with this until id-based paging is introduced + * to the user interface. + * + * @param int $user_id + * @param int $offset skip past the most recent N notices (after since_id checks) + * @param int $limit + * @param mixed $since_id return only notices after but not including this id + * @param mixed $max_id return only notices up to and including this id + * @param mixed $own ignored? + * @return array of Notice objects + * + * @todo consider repacking the inbox when this happens? + * @fixme reimplement $own if we need it? + */ + function streamNotices($user_id, $offset, $limit, $since_id, $max_id, $own=false) + { + $ids = self::stream($user_id, $offset, self::MAX_NOTICES, $since_id, $max_id, $own); + + // Do a bulk lookup for the first $limit items + // Fast path when nothing's deleted. + $firstChunk = array_slice($ids, 0, $limit); + $notices = Notice::getStreamByIds($firstChunk); + + $wanted = count($firstChunk); // raw entry count in the inbox up to our $limit + if ($notices->N >= $wanted) { + return $notices; + } + + // There were deleted notices, we'll need to look for more. + assert($notices instanceof ArrayWrapper); + $items = $notices->_items; + $remainder = array_slice($ids, $limit); + + while (count($items) < $wanted && count($remainder) > 0) { + $notice = Notice::staticGet(array_shift($remainder)); + if ($notice) { + $items[] = $notice; + } else { + } + } + return new ArrayWrapper($items); + } + + /** + * Saves a list of integer notice_ids into a packed blob in this object. + * @param array $ids list of integer notice_ids + */ + protected function pack(array $ids) + { + $this->notice_ids = call_user_func_array('pack', array_merge(array('N*'), $ids)); + } + + /** + * @return array of integer notice_ids + */ + protected function unpack() + { + return unpack('N*', $this->notice_ids); + } }