<?php
-
/**
* @file include/ParseUrl.php
* @brief Get informations about a given URL
*/
-
namespace Friendica;
use Friendica\Core\Config;
+use Friendica\Object\Image;
+use Friendica\Util\XML;
-use xml;
use dba;
-
use DomXPath;
use DOMDocument;
-require_once("include/network.php");
-require_once("include/Photo.php");
-require_once("include/oembed.php");
-require_once("include/xml.php");
+require_once "include/network.php";
+require_once "include/oembed.php";
/**
* @brief Class with methods for extracting certain content from an url
*/
-class ParseUrl {
-
+class ParseUrl
+{
/**
* @brief Search for chached embeddable data of an url otherwise fetch it
*
- * @param type $url The url of the page which should be scraped
+ * @param type $url The url of the page which should be scraped
* @param type $no_guessing If true the parse doens't search for
- * preview pictures
- * @param type $do_oembed The false option is used by the function fetch_oembed()
- * to avoid endless loops
+ * preview pictures
+ * @param type $do_oembed The false option is used by the function fetch_oembed()
+ * to avoid endless loops
*
* @return array which contains needed data for embedding
* string 'url' => The url of the parsed page
* @see ParseUrl::getSiteinfo() for more information about scraping
* embeddable content
*/
- public static function getSiteinfoCached($url, $no_guessing = false, $do_oembed = true) {
-
+ public static function getSiteinfoCached($url, $no_guessing = false, $do_oembed = true)
+ {
if ($url == "") {
return false;
}
- $r = q("SELECT * FROM `parsed_url` WHERE `url` = '%s' AND `guessing` = %d AND `oembed` = %d",
- dbesc(normalise_link($url)), intval(!$no_guessing), intval($do_oembed));
+ $r = q(
+ "SELECT * FROM `parsed_url` WHERE `url` = '%s' AND `guessing` = %d AND `oembed` = %d",
+ dbesc(normalise_link($url)),
+ intval(!$no_guessing),
+ intval($do_oembed)
+ );
if ($r) {
$data = $r[0]["content"];
$data = self::getSiteinfo($url, $no_guessing, $do_oembed);
- dba::insert('parsed_url', array('url' => normalise_link($url), 'guessing' => !$no_guessing,
+ dba::insert(
+ 'parsed_url',
+ array(
+ 'url' => normalise_link($url), 'guessing' => !$no_guessing,
'oembed' => $do_oembed, 'content' => serialize($data),
- 'created' => datetime_convert()), true);
+ 'created' => datetime_convert()),
+ true
+ );
return $data;
}
* like \<title\>Awesome Title\</title\> or
* \<meta name="description" content="An awesome description"\>
*
- * @param type $url The url of the page which should be scraped
+ * @param type $url The url of the page which should be scraped
* @param type $no_guessing If true the parse doens't search for
- * preview pictures
- * @param type $do_oembed The false option is used by the function fetch_oembed()
- * to avoid endless loops
- * @param type $count Internal counter to avoid endless loops
+ * preview pictures
+ * @param type $do_oembed The false option is used by the function fetch_oembed()
+ * to avoid endless loops
+ * @param type $count Internal counter to avoid endless loops
*
* @return array which contains needed data for embedding
* string 'url' => The url of the parsed page
* </body>
* @endverbatim
*/
- public static function getSiteinfo($url, $no_guessing = false, $do_oembed = true, $count = 1) {
-
+ public static function getSiteinfo($url, $no_guessing = false, $do_oembed = true, $count = 1)
+ {
$a = get_app();
$siteinfo = array();
$body = $data["body"];
if ($do_oembed) {
-
$oembed_data = oembed_fetch_url($url);
- if (!in_array($oembed_data->type, array("error", "rich"))) {
+ if (!in_array($oembed_data->type, array("error", "rich", ""))) {
$siteinfo["type"] = $oembed_data->type;
}
if (($oembed_data->type == "link") && ($siteinfo["type"] != "photo")) {
if (isset($oembed_data->title)) {
- $siteinfo["title"] = $oembed_data->title;
+ $siteinfo["title"] = trim($oembed_data->title);
}
if (isset($oembed_data->description)) {
$siteinfo["text"] = trim($oembed_data->description);
$doc = new DOMDocument();
@$doc->loadHTML($body);
- xml::deleteNode($doc, "style");
- xml::deleteNode($doc, "script");
- xml::deleteNode($doc, "option");
- xml::deleteNode($doc, "h1");
- xml::deleteNode($doc, "h2");
- xml::deleteNode($doc, "h3");
- xml::deleteNode($doc, "h4");
- xml::deleteNode($doc, "h5");
- xml::deleteNode($doc, "h6");
- xml::deleteNode($doc, "ol");
- xml::deleteNode($doc, "ul");
+ XML::deleteNode($doc, "style");
+ XML::deleteNode($doc, "script");
+ XML::deleteNode($doc, "option");
+ XML::deleteNode($doc, "h1");
+ XML::deleteNode($doc, "h2");
+ XML::deleteNode($doc, "h3");
+ XML::deleteNode($doc, "h4");
+ XML::deleteNode($doc, "h5");
+ XML::deleteNode($doc, "h6");
+ XML::deleteNode($doc, "ol");
+ XML::deleteNode($doc, "ul");
$xpath = new DomXPath($doc);
$list = $xpath->query("//title");
if ($list->length > 0) {
- $siteinfo["title"] = $list->item(0)->nodeValue;
+ $siteinfo["title"] = trim($list->item(0)->nodeValue);
}
//$list = $xpath->query("head/meta[@name]");
if ($attr["content"] != "") {
switch (strtolower($attr["name"])) {
case "fulltitle":
- $siteinfo["title"] = $attr["content"];
+ $siteinfo["title"] = trim($attr["content"]);
break;
case "description":
- $siteinfo["text"] = $attr["content"];
+ $siteinfo["text"] = trim($attr["content"]);
break;
case "thumbnail":
$siteinfo["image"] = $attr["content"];
}
break;
case "twitter:description":
- $siteinfo["text"] = $attr["content"];
+ $siteinfo["text"] = trim($attr["content"]);
break;
case "twitter:title":
- $siteinfo["title"] = $attr["content"];
+ $siteinfo["title"] = trim($attr["content"]);
break;
case "dc.title":
- $siteinfo["title"] = $attr["content"];
+ $siteinfo["title"] = trim($attr["content"]);
break;
case "dc.description":
- $siteinfo["text"] = $attr["content"];
+ $siteinfo["text"] = trim($attr["content"]);
break;
case "keywords":
$keywords = explode(",", $attr["content"]);
$siteinfo["image"] = $attr["content"];
break;
case "og:title":
- $siteinfo["title"] = $attr["content"];
+ $siteinfo["title"] = trim($attr["content"]);
break;
case "og:description":
- $siteinfo["text"] = $attr["content"];
+ $siteinfo["text"] = trim($attr["content"]);
break;
}
}
}
$src = self::completeUrl($attr["src"], $url);
- $photodata = get_photo_info($src);
+ $photodata = Image::getInfoFromURL($src);
if (($photodata) && ($photodata[0] > 150) && ($photodata[1] > 150)) {
if ($photodata[0] > 300) {
"width" => $photodata[0],
"height" => $photodata[1]);
}
-
- }
+ }
} elseif ($siteinfo["image"] != "") {
$src = self::completeUrl($siteinfo["image"], $url);
unset($siteinfo["image"]);
- $photodata = get_photo_info($src);
+ $photodata = Image::getInfoFromURL($src);
if (($photodata) && ($photodata[0] > 10) && ($photodata[1] > 10)) {
$siteinfo["images"][] = array("src" => $src,
* @param string $string Tags
* @return array with formatted Hashtags
*/
- public static function convertTagsToArray($string) {
+ public static function convertTagsToArray($string)
+ {
$arr_tags = str_getcsv($string);
if (count($arr_tags)) {
// add the # sign to every tag
* This method is used as callback function
*
* @param string $tag The pure tag name
- * @param int $k Counter for internal use
+ * @param int $k Counter for internal use
+ * @return void
*/
- private static function arrAddHashes(&$tag, $k) {
+ private static function arrAddHashes(&$tag, $k)
+ {
$tag = "#" . $tag;
}
* can miss the scheme so we need to add the correct
* scheme
*
- * @param string $url The url which possibly does have
- * a missing scheme (a link to an image)
+ * @param string $url The url which possibly does have
+ * a missing scheme (a link to an image)
* @param string $scheme The url with a correct scheme
- * (e.g. the url from the webpage which does contain the image)
+ * (e.g. the url from the webpage which does contain the image)
*
* @return string The url with a scheme
*/
- private static function completeUrl($url, $scheme) {
+ private static function completeUrl($url, $scheme)
+ {
$urlarr = parse_url($url);
// If the url does allready have an scheme
$complete .= ":".$schemearr["port"];
}
- if (strpos($urlarr["path"],"/") !== 0) {
+ if (strpos($urlarr["path"], "/") !== 0) {
$complete .= "/";
}