Clean up channel feed

pull/107/head
Omar Roth 6 years ago
parent 9fbab6125a
commit 09d4d46fcd

@ -1279,33 +1279,31 @@ get "/feed/channel/:ucid" do |env|
ucid = env.params.url["ucid"] ucid = env.params.url["ucid"]
client = make_client(YT_URL) client = make_client(YT_URL)
if !ucid.match(/UC[a-zA-Z0-9_-]{22}/)
rss = client.get("/feeds/videos.xml?user=#{ucid}").body
rss = XML.parse_html(rss)
if !ucid.match(/UC[a-zA-Z0-9_-]{22}/)
rss = client.get("/feeds/videos.xml?user=#{ucid}")
rss = XML.parse_html(rss.body)
ucid = rss.xpath_node("//feed/channelid") ucid = rss.xpath_node("//feed/channelid")
if ucid if !ucid
ucid = ucid.content error_message = "User does not exist."
else halt env, status_code: 404, response: error_message
env.response.content_type = "application/json"
next {"error" => "User does not exist"}.to_json
end end
next env.redirect "/channel/#{ucid}"
end end
url = produce_videos_url(ucid) url = produce_videos_url(ucid)
response = client.get(url) response = client.get(url)
response = JSON.parse(response.body)
channel = get_channel(ucid, client, PG_DB, pull_all_videos: false) if !response["content_html"]?
error_message = "This channel does not exist."
json = JSON.parse(response.body) halt env, status_code: 404, response: error_message
if !json["content_html"]? || json["content_html"].as_s.empty?
error_message = "This channel does not exist or has no videos."
next templated "error"
end end
content_html = response["content_html"].as_s
content_html = json["content_html"].as_s
document = XML.parse_html(content_html) document = XML.parse_html(content_html)
channel = get_channel(ucid, client, PG_DB, pull_all_videos: false)
host_url = make_host_url(Kemal.config.ssl || CONFIG.https_only, env.request.headers["Host"]?) host_url = make_host_url(Kemal.config.ssl || CONFIG.https_only, env.request.headers["Host"]?)
path = env.request.path path = env.request.path
@ -1323,77 +1321,30 @@ get "/feed/channel/:ucid" do |env|
xml.element("uri") { xml.text "#{host_url}/channel/#{ucid}" } xml.element("uri") { xml.text "#{host_url}/channel/#{ucid}" }
end end
document.xpath_nodes(%q(//li[contains(@class, "feed-item-container")])).each do |node| extract_channel_videos(document, channel.author, ucid).each do |video|
anchor = node.xpath_node(%q(.//h3[contains(@class,"yt-lockup-title")]/a))
if !anchor
next
end
if anchor["href"].starts_with? "https://www.googleadservices.com"
next
end
title = anchor.content.strip
video_id = anchor["href"].lchop("/watch?v=")
metadata = node.xpath_nodes(%q(.//div[contains(@class,"yt-lockup-meta")]/ul/li))
if metadata.size == 0
next
elsif metadata.size == 1
view_count = metadata[0].content.split(" ")[0].delete(",").to_i64
published = Time.now
else
published = decode_date(metadata[0].content)
view_count = metadata[1].content.split(" ")[0]
if view_count == "No"
view_count = 0_i64
else
view_count = view_count.delete(",").to_i64
end
end
description_html = node.xpath_node(%q(.//div[contains(@class, "yt-lockup-description")]))
if !description_html
description = ""
description_html = ""
else
description_html = description_html.to_s
description = description_html.gsub("<br>", "\n")
description = description.gsub("<br/>", "\n")
description = XML.parse_html(description).content.strip("\n ")
end
length_seconds = node.xpath_node(%q(.//span[@class="video-time"]))
if length_seconds
length_seconds = decode_length_seconds(length_seconds.content)
else
length_seconds = -1
end
xml.element("entry") do xml.element("entry") do
xml.element("id") { xml.text "yt:video:#{video_id}" } xml.element("id") { xml.text "yt:video:#{video.id}" }
xml.element("yt:videoId") { xml.text video_id } xml.element("yt:videoId") { xml.text video.id }
xml.element("yt:channelId") { xml.text ucid } xml.element("yt:channelId") { xml.text ucid }
xml.element("title") { xml.text title } xml.element("title") { xml.text video.title }
xml.element("link", rel: "alternate", href: "#{host_url}/watch?v=#{video_id}") xml.element("link", rel: "alternate", href: "#{host_url}/watch?v=#{video.id}")
xml.element("author") do xml.element("author") do
xml.element("name") { xml.text channel.author } xml.element("name") { xml.text channel.author }
xml.element("uri") { xml.text "#{host_url}/channel/#{ucid}" } xml.element("uri") { xml.text "#{host_url}/channel/#{ucid}" }
end end
xml.element("published") { xml.text published.to_s("%Y-%m-%dT%H:%M:%S%:z") } xml.element("published") { xml.text video.published.to_s("%Y-%m-%dT%H:%M:%S%:z") }
xml.element("media:group") do xml.element("media:group") do
xml.element("media:title") { xml.text title } xml.element("media:title") { xml.text video.title }
xml.element("media:thumbnail", url: "https://i.ytimg.com/vi/#{video_id}/hqdefault.jpg", xml.element("media:thumbnail", url: "https://i.ytimg.com/vi/#{video.id}/mqdefault.jpg",
width: "480", height: "360") width: "320", height: "180")
xml.element("media:description") { xml.text description } xml.element("media:description") { xml.text video.description }
end end
xml.element("media:community") do xml.element("media:community") do
xml.element("media:statistics", views: view_count) xml.element("media:statistics", views: video.views)
end end
end end
end end
@ -1492,8 +1443,8 @@ get "/feed/private" do |env|
xml.element("media:group") do xml.element("media:group") do
xml.element("media:title") { xml.text video.title } xml.element("media:title") { xml.text video.title }
xml.element("media:thumbnail", url: "https://i.ytimg.com/vi/#{video.id}/hqdefault.jpg", xml.element("media:thumbnail", url: "https://i.ytimg.com/vi/#{video.id}/mqdefault.jpg",
width: "480", height: "360") width: "320", height: "180")
end end
end end
end end

@ -130,3 +130,69 @@ def fetch_channel(ucid, client, db, pull_all_videos = true)
return channel return channel
end end
def extract_channel_videos(document, author, ucid)
channel_videos = [] of Video
document.xpath_nodes(%q(//li[contains(@class, "feed-item-container")])).each do |node|
anchor = node.xpath_node(%q(.//h3[contains(@class,"yt-lockup-title")]/a))
if !anchor
next
end
if anchor["href"].starts_with? "https://www.googleadservices.com"
next
end
title = anchor.content.strip
id = anchor["href"].lchop("/watch?v=")
metadata = node.xpath_nodes(%q(.//div[contains(@class,"yt-lockup-meta")]/ul/li))
if metadata.size == 0
next
elsif metadata.size == 1
view_count = metadata[0].content.split(" ")[0].delete(",").to_i64
published = Time.now
else
published = decode_date(metadata[0].content)
view_count = metadata[1].content.split(" ")[0]
if view_count == "No"
view_count = 0_i64
else
view_count = view_count.delete(",").to_i64
end
end
description_html = node.xpath_node(%q(.//div[contains(@class, "yt-lockup-description")]))
description = description_html_to_description(description_html)
length_seconds = node.xpath_node(%q(.//span[@class="video-time"]))
if length_seconds
length_seconds = decode_length_seconds(length_seconds.content)
else
length_seconds = -1
end
info = HTTP::Params.parse("length_seconds=#{length_seconds}")
channel_videos << Video.new(
id,
info,
Time.now,
title,
view_count,
0, # Like count
0, # Dislike count
0.0, # Wilson score
published,
description,
"", # Language,
author,
ucid,
[] of String, # Allowed regions
true, # Is family friendly
"" # Genre
)
end
return channel_videos
end

@ -272,3 +272,17 @@ def generate_captcha(key)
return {challenge: challenge, token: token} return {challenge: challenge, token: token}
end end
def description_html_to_description(description_html)
if !description_html
description = ""
description_html = ""
else
description_html = description_html.to_s
description = description_html.gsub("<br>", "\n")
description = description.gsub("<br/>", "\n")
description = XML.parse_html(description).content.strip("\n ")
end
return description
end

Loading…
Cancel
Save