-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscript.py
More file actions
83 lines (66 loc) · 2.85 KB
/
script.py
File metadata and controls
83 lines (66 loc) · 2.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import json
import os
import html
import glob
# Function to escape XML special characters
def escape_xml(text):
return html.escape(text, quote=False)
# Find the JSON file - look for newpipe_subscriptions_*.json files
json_files = glob.glob('newpipe_subscriptions_*.json')
if json_files:
# Use the most recent file (by modification time)
json_file = max(json_files, key=os.path.getmtime)
else:
print("Error: No newpipe_subscriptions_*.json file found!")
exit(1)
# Open the JSON file for reading
with open(json_file, 'r', encoding='utf-8') as file:
# Load the data into a variable
subscriptions = json.load(file)
# Loop through each subscription object
for sub in subscriptions['subscriptions']:
# Extract the URL from the 'url' field
url = sub['url']
# Replace the old URL with the new one
new_url = url.replace('https://www.youtube.com/channel/', 'https://www.youtube.com/feeds/videos.xml?channel_id=')
# Write the URL to a new text file
with open('urls.txt', 'a', encoding='utf-8') as outfile:
outfile.write(f'{new_url}\n')
# Loop through each subscription object
for sub in subscriptions['subscriptions']:
# Extract the URL from the 'url' field
name = sub['name']
# Write the URL to a new text file
with open('names.txt', 'a', encoding='utf-8') as outfile:
outfile.write(f'{name}\n')
# Creates OPML file
def create_opml_file(urls_file, names_file, output_file):
with open(urls_file, 'r', encoding='utf-8') as urls_file:
feed_urls = [line.strip() for line in urls_file.readlines()]
with open(names_file, 'r', encoding='utf-8') as names_file:
website_names = [line.strip() for line in names_file.readlines()]
with open(output_file, 'w', encoding='utf-8') as opml_file:
opml_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
opml_file.write('<opml version="1.0">\n')
opml_file.write('<head>\n')
opml_file.write(' <title>YouTube Feed</title>\n')
opml_file.write('</head>\n')
opml_file.write('<body>\n')
for url, name in zip(feed_urls, website_names):
escaped_name = escape_xml(name)
opml_file.write(f' <outline text="{escaped_name}" title="{escaped_name}" type="rss" xmlUrl="{url}"/>\n')
opml_file.write('</body>\n')
opml_file.write('</opml>\n')
# Deletes tmp files
def delete_txt_files():
for filename in os.listdir("."):
if filename.endswith(".txt"):
file_path = os.path.join(".", filename)
os.remove(file_path)
if __name__ == "__main__":
urls_file = "urls.txt"
names_file = "names.txt"
output_file = "feeds.opml"
create_opml_file(urls_file, names_file, output_file)
print(f"OPML file '{output_file}' created successfully.")
delete_txt_files()