X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;ds=inline;f=classes%2FInbox.php;h=f0f626a24e765acb36edc6f40ad047f9b48e27d8;hb=6c236ab0ff4309c883d7856324f409e043f7db56;hp=014ba3d82923efb70d07fe0b70137fba726ee99c;hpb=72460091ddc6763cb8d62f9d865390ea4973dd44;p=quix0rs-gnu-social.git diff --git a/classes/Inbox.php b/classes/Inbox.php index 014ba3d829..f0f626a24e 100644 --- a/classes/Inbox.php +++ b/classes/Inbox.php @@ -55,7 +55,6 @@ class Inbox extends Memcached_DataObject /** * Create a new inbox from existing Notice_inbox stuff */ - static function initialize($user_id) { $inbox = Inbox::fromNoticeInbox($user_id); @@ -96,17 +95,31 @@ class Inbox extends Memcached_DataObject $inbox = new Inbox(); $inbox->user_id = $user_id; - $inbox->notice_ids = call_user_func_array('pack', array_merge(array('N*'), $ids)); + $inbox->pack($ids); $inbox->fake = true; return $inbox; } + /** + * Append the given notice to the given user's inbox. + * Caching updates are managed for the inbox itself. + * + * If the notice is already in this inbox, the second + * add will be silently dropped. + * + * @param int @user_id + * @param int $notice_id + * @return boolean success + */ static function insertNotice($user_id, $notice_id) { - $inbox = DB_DataObject::staticGet('inbox', 'user_id', $user_id); - - if (empty($inbox)) { + // Going straight to the DB rather than trusting our caching + // during an update. Note: not using DB_DataObject::staticGet, + // which is unsafe to use directly (in-process caching causes + // memory leaks, which accumulate in queue processes). + $inbox = new Inbox(); + if (!$inbox->get('user_id', $user_id)) { $inbox = Inbox::initialize($user_id); } @@ -114,6 +127,13 @@ class Inbox extends Memcached_DataObject return false; } + $ids = $inbox->unpack(); + if (in_array(intval($notice_id), $ids)) { + // Already in there, we probably re-ran some inbox adds + // due to an error. Skip the dupe silently. + return true; + } + $result = $inbox->query(sprintf('UPDATE inbox '. 'set notice_ids = concat(cast(0x%08x as binary(4)), '. 'substr(notice_ids, 1, %d)) '. @@ -150,7 +170,7 @@ class Inbox extends Memcached_DataObject } } - $ids = unpack('N*', $inbox->notice_ids); + $ids = $inbox->unpack(); if (!empty($since_id)) { $newids = array(); @@ -177,6 +197,70 @@ class Inbox extends Memcached_DataObject return $ids; } + /** + * Wrapper for Inbox::stream() and Notice::getStreamByIds() returning + * additional items up to the limit if we were short due to deleted + * notices still being listed in the inbox. + * + * This is meant to assist threaded views, and optimizes paging for + * threadness. Not ideal for very late pages, as we have to bump about + * through all previous items. + * + * Should avoid duplicates in paging, though. + * + * @param int $user_id + * @param int $offset skip past the most recent N notices (after since_id checks) + * @param int $limit + * @param mixed $since_id return only notices after but not including this id + * @param mixed $max_id return only notices up to and including this id + * @param mixed $own ignored? + * @return array of Notice objects + * + * @todo consider repacking the inbox when this happens? + * @fixme reimplement $own if we need it? + */ + function streamNoticesThreaded($user_id, $offset, $limit, $since_id, $max_id, $own=false) + { + // So what we want is: + // * slurp in the beginning of the notice list + // * filter out deleted notices + // * replace any reply notices with their conversation roots + // * filter out any duplicate conversations + // * return $limit notices after skipping $offset from the most recent + + $ids = self::stream($user_id, 0, self::MAX_NOTICES, $since_id, $max_id, $own); + + // Do a bulk lookup for the first $limit items + // Fast path when nothing's deleted. + $firstChunk = array_slice($ids, 0, $offset + $limit); + $notices = Notice::getStreamByIds($firstChunk); + + assert($notices instanceof ArrayWrapper); + $items = $notices->_items; + + // Extract the latest non-deleted item in each convo + $noticeByConvo = array(); + foreach ($items as $notice) { + if (empty($noticeByConvo[$notice->conversation])) { + $noticeByConvo[$notice->conversation] = $notice; + } + } + + $wanted = count($firstChunk); // raw entry count in the inbox up to our $limit + // There were deleted notices, we'll need to look for more. + $remainder = array_slice($ids, $limit); + + for ($i = $offset + $limit; count($noticeByConvo) < $wanted && $i < count($ids); $i++) { + $notice = Notice::staticGet($ids[$i]); + if ($notice && empty($noticeByConvo[$notice->conversation])) { + $noticeByConvo[$notice->conversation] = $notice; + } + } + + $slice = array_slice($noticeByConvo, $offset, $limit, false); + return new ArrayWrapper($slice); + } + /** * Wrapper for Inbox::stream() and Notice::getStreamByIds() returning * additional items up to the limit if we were short due to deleted @@ -229,4 +313,21 @@ class Inbox extends Memcached_DataObject } return new ArrayWrapper($items); } + + /** + * Saves a list of integer notice_ids into a packed blob in this object. + * @param array $ids list of integer notice_ids + */ + protected function pack(array $ids) + { + $this->notice_ids = call_user_func_array('pack', array_merge(array('N*'), $ids)); + } + + /** + * @return array of integer notice_ids + */ + protected function unpack() + { + return unpack('N*', $this->notice_ids); + } }