$data = ['url' => $url, 'type' => 'photo'];
} else {
// Checking, if the link goes to a picture
- $data = ParseUrl::getSiteinfoCached($pictures[0][1], true);
+ $data = ParseUrl::getSiteinfoCached($pictures[0][1]);
}
// Workaround:
// Sometimes photo posts to the own album are not detected at the start.
// So we seem to cannot use the cache for these cases. That's strange.
if (($data['type'] != 'photo') && strstr($pictures[0][1], "/photos/")) {
- $data = ParseUrl::getSiteinfo($pictures[0][1], true);
+ $data = ParseUrl::getSiteinfo($pictures[0][1]);
}
if ($data['type'] == 'photo') {
$post['text'] = trim($body);
}
} elseif (isset($post['url']) && ($post['type'] == 'video')) {
- $data = ParseUrl::getSiteinfoCached($post['url'], true);
+ $data = ParseUrl::getSiteinfoCached($post['url']);
if (isset($data['images'][0])) {
$post['image'] = $data['images'][0]['src'];
* Search for chached embeddable data of an url otherwise fetch it
*
* @param string $url The url of the page which should be scraped
- * @param bool $no_guessing If true the parse doens't search for
- * preview pictures
* @param bool $do_oembed The false option is used by the function fetch_oembed()
* to avoid endless loops
*
* string 'type' => Content type
* string 'title' => (optional) The title of the content
* string 'text' => (optional) The description for the content
- * string 'image' => (optional) A preview image of the content (only available if $no_geuessing = false)
+ * string 'image' => (optional) A preview image of the content
* array 'images' => (optional) Array of preview pictures
* string 'keywords' => (optional) The tags which belong to the content
*
* @see ParseUrl::getSiteinfo() for more information about scraping
* embeddable content
*/
- public static function getSiteinfoCached($url, $no_guessing = false, $do_oembed = true): array
+ public static function getSiteinfoCached($url, $do_oembed = true): array
{
if (empty($url)) {
return [
$urlHash = hash('sha256', $url);
$parsed_url = DBA::selectFirst('parsed_url', ['content'],
- ['url_hash' => $urlHash, 'guessing' => !$no_guessing, 'oembed' => $do_oembed]
+ ['url_hash' => $urlHash, 'oembed' => $do_oembed]
);
if (!empty($parsed_url['content'])) {
$data = unserialize($parsed_url['content']);
return $data;
}
- $data = self::getSiteinfo($url, $no_guessing, $do_oembed);
+ $data = self::getSiteinfo($url, $do_oembed);
$expires = $data['expires'];
'parsed_url',
[
'url_hash' => $urlHash,
- 'guessing' => !$no_guessing,
'oembed' => $do_oembed,
'url' => $url,
'content' => serialize($data),
* \<meta name="description" content="An awesome description"\>
*
* @param string $url The url of the page which should be scraped
- * @param bool $no_guessing If true the parse doens't search for
- * preview pictures
* @param bool $do_oembed The false option is used by the function fetch_oembed()
* to avoid endless loops
* @param int $count Internal counter to avoid endless loops
* string 'type' => Content type (error, link, photo, image, audio, video)
* string 'title' => (optional) The title of the content
* string 'text' => (optional) The description for the content
- * string 'image' => (optional) A preview image of the content (only available if $no_guessing = false)
+ * string 'image' => (optional) A preview image of the content
* array 'images' => (optional) Array of preview pictures
* string 'keywords' => (optional) The tags which belong to the content
*
* </body>
* @endverbatim
*/
- public static function getSiteinfo($url, $no_guessing = false, $do_oembed = true, $count = 1)
+ public static function getSiteinfo($url, $do_oembed = true, $count = 1)
{
if (empty($url)) {
return [
}
}
if ($content != '') {
- $siteinfo = self::getSiteinfo($content, $no_guessing, $do_oembed, ++$count);
+ $siteinfo = self::getSiteinfo($content, $do_oembed, ++$count);
return $siteinfo;
}
}