3 * @file src/Util/ParseUrl.php
4 * @brief Get informations about a given URL
6 namespace Friendica\Util;
8 use Friendica\Content\OEmbed;
9 use Friendica\Core\Addon;
10 use Friendica\Object\Image;
11 use Friendica\Util\Network;
12 use Friendica\Util\XML;
18 require_once 'include/dba.php';
21 * @brief Class with methods for extracting certain content from an url
26 * @brief Search for chached embeddable data of an url otherwise fetch it
28 * @param string $url The url of the page which should be scraped
29 * @param bool $no_guessing If true the parse doens't search for
31 * @param bool $do_oembed The false option is used by the function fetch_oembed()
32 * to avoid endless loops
34 * @return array which contains needed data for embedding
35 * string 'url' => The url of the parsed page
36 * string 'type' => Content type
37 * string 'title' => The title of the content
38 * string 'text' => The description for the content
39 * string 'image' => A preview image of the content (only available
40 * if $no_geuessing = false
41 * array'images' = Array of preview pictures
42 * string 'keywords' => The tags which belong to the content
44 * @see ParseUrl::getSiteinfo() for more information about scraping
47 public static function getSiteinfoCached($url, $no_guessing = false, $do_oembed = true)
53 $parsed_url = dba::selectFirst('parsed_url', ['content'],
54 ['url' => normalise_link($url), 'guessing' => !$no_guessing, 'oembed' => $do_oembed]
56 if (!empty($parsed_url['content'])) {
57 $data = unserialize($parsed_url['content']);
61 $data = self::getSiteinfo($url, $no_guessing, $do_oembed);
66 'url' => normalise_link($url), 'guessing' => !$no_guessing,
67 'oembed' => $do_oembed, 'content' => serialize($data),
68 'created' => DateTimeFormat::utcNow()
76 * @brief Parse a page for embeddable content information
78 * This method parses to url for meta data which can be used to embed
79 * the content. If available it prioritizes Open Graph meta tags.
80 * If this is not available it uses the twitter cards meta tags.
81 * As fallback it uses standard html elements with meta informations
82 * like \<title\>Awesome Title\</title\> or
83 * \<meta name="description" content="An awesome description"\>
85 * @param string $url The url of the page which should be scraped
86 * @param bool $no_guessing If true the parse doens't search for
88 * @param bool $do_oembed The false option is used by the function fetch_oembed()
89 * to avoid endless loops
90 * @param int $count Internal counter to avoid endless loops
92 * @return array which contains needed data for embedding
93 * string 'url' => The url of the parsed page
94 * string 'type' => Content type
95 * string 'title' => The title of the content
96 * string 'text' => The description for the content
97 * string 'image' => A preview image of the content (only available
98 * if $no_geuessing = false
99 * array'images' = Array of preview pictures
100 * string 'keywords' => The tags which belong to the content
102 * @todo https://developers.google.com/+/plugins/snippet/
104 * <meta itemprop="name" content="Awesome title">
105 * <meta itemprop="description" content="An awesome description">
106 * <meta itemprop="image" content="http://maple.libertreeproject.org/images/tree-icon.png">
108 * <body itemscope itemtype="http://schema.org/Product">
109 * <h1 itemprop="name">Shiny Trinket</h1>
110 * <img itemprop="image" src="{image-url}" />
111 * <p itemprop="description">Shiny trinkets are shiny.</p>
115 public static function getSiteinfo($url, $no_guessing = false, $do_oembed = true, $count = 1)
121 // Check if the URL does contain a scheme
122 $scheme = parse_url($url, PHP_URL_SCHEME);
125 $url = "http://".trim($url, "/");
129 logger("parseurl_getsiteinfo: Endless loop detected for ".$url, LOGGER_DEBUG);
133 $url = trim($url, "'");
134 $url = trim($url, '"');
136 $url = Network::stripTrackingQueryParams($url);
138 $siteinfo["url"] = $url;
139 $siteinfo["type"] = "link";
141 $data = Network::curl($url);
142 if (!$data['success']) {
146 // If the file is too large then exit
147 if ($data["info"]["download_content_length"] > 1000000) {
151 // If it isn't a HTML file then exit
152 if (($data["info"]["content_type"] != "") && !strstr(strtolower($data["info"]["content_type"]), "html")) {
156 $header = $data["header"];
157 $body = $data["body"];
160 $oembed_data = OEmbed::fetchURL($url);
162 if (!empty($oembed_data->type)) {
163 if (!in_array($oembed_data->type, ["error", "rich", ""])) {
164 $siteinfo["type"] = $oembed_data->type;
167 if (($oembed_data->type == "link") && ($siteinfo["type"] != "photo")) {
168 if (isset($oembed_data->title)) {
169 $siteinfo["title"] = trim($oembed_data->title);
171 if (isset($oembed_data->description)) {
172 $siteinfo["text"] = trim($oembed_data->description);
174 if (isset($oembed_data->thumbnail_url)) {
175 $siteinfo["image"] = $oembed_data->thumbnail_url;
181 // Fetch the first mentioned charset. Can be in body or header
183 if (preg_match('/charset=(.*?)['."'".'"\s\n]/', $header, $matches)) {
184 $charset = trim(trim(trim(array_pop($matches)), ';,'));
187 if ($charset == "") {
191 if (($charset != "") && (strtoupper($charset) != "UTF-8")) {
192 logger("parseurl_getsiteinfo: detected charset ".$charset, LOGGER_DEBUG);
193 //$body = mb_convert_encoding($body, "UTF-8", $charset);
194 $body = iconv($charset, "UTF-8//TRANSLIT", $body);
197 $body = mb_convert_encoding($body, 'HTML-ENTITIES', "UTF-8");
199 $doc = new DOMDocument();
200 @$doc->loadHTML($body);
202 XML::deleteNode($doc, "style");
203 XML::deleteNode($doc, "script");
204 XML::deleteNode($doc, "option");
205 XML::deleteNode($doc, "h1");
206 XML::deleteNode($doc, "h2");
207 XML::deleteNode($doc, "h3");
208 XML::deleteNode($doc, "h4");
209 XML::deleteNode($doc, "h5");
210 XML::deleteNode($doc, "h6");
211 XML::deleteNode($doc, "ol");
212 XML::deleteNode($doc, "ul");
214 $xpath = new DOMXPath($doc);
216 $list = $xpath->query("//meta[@content]");
217 foreach ($list as $node) {
219 if ($node->attributes->length) {
220 foreach ($node->attributes as $attribute) {
221 $attr[$attribute->name] = $attribute->value;
225 if (@$attr["http-equiv"] == "refresh") {
226 $path = $attr["content"];
227 $pathinfo = explode(";", $path);
229 foreach ($pathinfo as $value) {
230 if (substr(strtolower($value), 0, 4) == "url=") {
231 $content = substr($value, 4);
234 if ($content != "") {
235 $siteinfo = self::getSiteinfo($content, $no_guessing, $do_oembed, ++$count);
241 $list = $xpath->query("//title");
242 if ($list->length > 0) {
243 $siteinfo["title"] = trim($list->item(0)->nodeValue);
246 //$list = $xpath->query("head/meta[@name]");
247 $list = $xpath->query("//meta[@name]");
248 foreach ($list as $node) {
250 if ($node->attributes->length) {
251 foreach ($node->attributes as $attribute) {
252 $attr[$attribute->name] = $attribute->value;
256 if (!empty($attr["content"])) {
257 $attr["content"] = trim(html_entity_decode($attr["content"], ENT_QUOTES, "UTF-8"));
259 switch (strtolower($attr["name"])) {
261 $siteinfo["title"] = trim($attr["content"]);
264 $siteinfo["text"] = trim($attr["content"]);
267 $siteinfo["image"] = $attr["content"];
269 case "twitter:image":
270 $siteinfo["image"] = $attr["content"];
272 case "twitter:image:src":
273 $siteinfo["image"] = $attr["content"];
276 if (($siteinfo["type"] == "") || ($attr["content"] == "photo")) {
277 $siteinfo["type"] = $attr["content"];
280 case "twitter:description":
281 $siteinfo["text"] = trim($attr["content"]);
283 case "twitter:title":
284 $siteinfo["title"] = trim($attr["content"]);
287 $siteinfo["title"] = trim($attr["content"]);
289 case "dc.description":
290 $siteinfo["text"] = trim($attr["content"]);
293 $keywords = explode(",", $attr["content"]);
295 case "news_keywords":
296 $keywords = explode(",", $attr["content"]);
300 if ($siteinfo["type"] == "summary") {
301 $siteinfo["type"] = "link";
305 if (isset($keywords)) {
306 $siteinfo["keywords"] = [];
307 foreach ($keywords as $keyword) {
308 if (!in_array(trim($keyword), $siteinfo["keywords"])) {
309 $siteinfo["keywords"][] = trim($keyword);
314 //$list = $xpath->query("head/meta[@property]");
315 $list = $xpath->query("//meta[@property]");
316 foreach ($list as $node) {
318 if ($node->attributes->length) {
319 foreach ($node->attributes as $attribute) {
320 $attr[$attribute->name] = $attribute->value;
324 if (!empty($attr["content"])) {
325 $attr["content"] = trim(html_entity_decode($attr["content"], ENT_QUOTES, "UTF-8"));
327 switch (strtolower($attr["property"])) {
329 $siteinfo["image"] = $attr["content"];
332 $siteinfo["title"] = trim($attr["content"]);
334 case "og:description":
335 $siteinfo["text"] = trim($attr["content"]);
341 if ((@$siteinfo["image"] == "") && !$no_guessing) {
342 $list = $xpath->query("//img[@src]");
343 foreach ($list as $node) {
345 if ($node->attributes->length) {
346 foreach ($node->attributes as $attribute) {
347 $attr[$attribute->name] = $attribute->value;
351 $src = self::completeUrl($attr["src"], $url);
352 $photodata = Image::getInfoFromURL($src);
354 if (($photodata) && ($photodata[0] > 150) && ($photodata[1] > 150)) {
355 if ($photodata[0] > 300) {
356 $photodata[1] = round($photodata[1] * (300 / $photodata[0]));
359 if ($photodata[1] > 300) {
360 $photodata[0] = round($photodata[0] * (300 / $photodata[1]));
363 $siteinfo["images"][] = ["src" => $src,
364 "width" => $photodata[0],
365 "height" => $photodata[1]];
368 } elseif (!empty($siteinfo["image"])) {
369 $src = self::completeUrl($siteinfo["image"], $url);
371 unset($siteinfo["image"]);
373 $photodata = Image::getInfoFromURL($src);
375 if (($photodata) && ($photodata[0] > 10) && ($photodata[1] > 10)) {
376 $siteinfo["images"][] = ["src" => $src,
377 "width" => $photodata[0],
378 "height" => $photodata[1]];
382 if ((@$siteinfo["text"] == "") && (@$siteinfo["title"] != "") && !$no_guessing) {
385 $list = $xpath->query("//div[@class='article']");
386 foreach ($list as $node) {
387 if (strlen($node->nodeValue) > 40) {
388 $text .= " ".trim($node->nodeValue);
393 $list = $xpath->query("//div[@class='content']");
394 foreach ($list as $node) {
395 if (strlen($node->nodeValue) > 40) {
396 $text .= " ".trim($node->nodeValue);
401 // If none text was found then take the paragraph content
403 $list = $xpath->query("//p");
404 foreach ($list as $node) {
405 if (strlen($node->nodeValue) > 40) {
406 $text .= " ".trim($node->nodeValue);
412 $text = trim(str_replace(["\n", "\r"], [" ", " "], $text));
414 while (strpos($text, " ")) {
415 $text = trim(str_replace(" ", " ", $text));
418 $siteinfo["text"] = trim(html_entity_decode(substr($text, 0, 350), ENT_QUOTES, "UTF-8").'...');
422 logger("parseurl_getsiteinfo: Siteinfo for ".$url." ".print_r($siteinfo, true), LOGGER_DEBUG);
424 Addon::callHooks("getsiteinfo", $siteinfo);
430 * @brief Convert tags from CSV to an array
432 * @param string $string Tags
433 * @return array with formatted Hashtags
435 public static function convertTagsToArray($string)
437 $arr_tags = str_getcsv($string);
438 if (count($arr_tags)) {
439 // add the # sign to every tag
440 array_walk($arr_tags, ["self", "arrAddHashes"]);
447 * @brief Add a hasht sign to a string
449 * This method is used as callback function
451 * @param string $tag The pure tag name
452 * @param int $k Counter for internal use
455 private static function arrAddHashes(&$tag, $k)
461 * @brief Add a scheme to an url
463 * The src attribute of some html elements (e.g. images)
464 * can miss the scheme so we need to add the correct
467 * @param string $url The url which possibly does have
468 * a missing scheme (a link to an image)
469 * @param string $scheme The url with a correct scheme
470 * (e.g. the url from the webpage which does contain the image)
472 * @return string The url with a scheme
474 private static function completeUrl($url, $scheme)
476 $urlarr = parse_url($url);
478 // If the url does allready have an scheme
479 // we can stop the process here
480 if (isset($urlarr["scheme"])) {
484 $schemearr = parse_url($scheme);
486 $complete = $schemearr["scheme"]."://".$schemearr["host"];
488 if (@$schemearr["port"] != "") {
489 $complete .= ":".$schemearr["port"];
492 if (strpos($urlarr["path"], "/") !== 0) {
496 $complete .= $urlarr["path"];
498 if (@$urlarr["query"] != "") {
499 $complete .= "?".$urlarr["query"];
502 if (@$urlarr["fragment"] != "") {
503 $complete .= "#".$urlarr["fragment"];