This Is A Rare Trick For BBHF Members, 100% Working For Any Paid Course You Want:
Go To
Skillshare
Find your favorite class/course
Copy the link of the Course
Head over to Skillshare
Hecking Site
Paste the Link and click ‘Heck’
Bam, Enjoy!
Reps Appreciated!
Thank you for an enormous share, bluenose10.
for share. +Max Reps Added
Code:
import requests
from bs4 import BeautifulSoup
from slugify import slugify
from time import sleep
# feeding the download page
url = 'https://skillshare.heckernohecking.repl.co/717737244'
html = requests.get(url)
soup = BeautifulSoup(html.text, 'html.parser')
# creating a list and saving the download links to that list
lists = []
# print("Just the urls")
for a in soup.find_all('a', href=True):
# print(a['href'])
lists.append(a['href'])
if len(lists) == 0:
print("Website is not loading")
else:
print(len(lists))
print(lists[1])
# extracting downloadable link
cool = lists[1]
print(cool[8:])
# writing download links to file
dn_list = []
for item in range(len(lists)):
temp = lists[item]
tempnew = temp[8:]
dn_list.append(tempnew)
dn_list.pop(0)
print("dn_list has this number of elements: ", len(dn_list))
print(dn_list[1])
# getting the anchor texts to a list
text_list = []
soup.prettify()
text_list.append(soup.get_text())
# open file in write mode
with open(r"F:\Premiere Pro\Advanced\Source1.txt", 'w') as fp:
for item in text_list:
# write each item on a new line
fp.write("%s\n" % item)
print('Done')
# so far we have saved to file
# now, we are going to clean the file
# it loads actual file contents to list called data
# then performs two functions to cleanup whitespaces and stores it in clean_data_1
# we save it to file
data = []
clean_data = []
clean_data_1 = []
try:
with open("F:\Premiere Pro\Advanced\Source1.txt", 'r') as fname:
data = fname.readlines()
# print(data)
for x in data:
print(x.strip())
val = x.strip()
clean_data.append(val)
clean_data_1 = list(filter(''.__ne__, clean_data))
except FileNotFoundError:
print("File Not Found")
del(clean_data_1[0:3])
# above code is used to delete the first three lines of the text file that is created
# to make the code more robust, this must be replaced with regex matching in future
with open(r"F:\Premiere Pro\Advanced\Source2.txt", 'w') as fp:
for item in clean_data_1:
# write each item on a new line
fp.write("%s\n" % item)
# print('Done')
# load the Final Anchor texts to a list
txt_file = open("F:\Premiere Pro\Advanced\Source2.txt", "r")
content_list = txt_file.readlines()
file_nams = []
for item in range(len(content_list)):
new_val = slugify(content_list[item])+".mp4"
file_nams.append(new_val)
print("Printing filenames list and check one item")
print(len(file_nams))
print(type(file_nams[2]))
print(f"The new amended file name is {file_nams[2]}")
print("clean data list length is: ", len(clean_data_1))
dl_lists = list(zip(dn_list, file_nams))
# print("Printing file url and filename")
# print(dl_lists[89])
def download(link, name):
print("Start downloading")
if '\r' in link:
link = link.strip("\r")
file = requests.get(link)
if file.status_code != 200:
sleep(1)
return False
new_file = open(name, "wb")
new_file.write(file.content)
new_file.close()
return True
# by default, firstnum is 0
firstnum = 0
lastnum = 20
for num in range(firstnum, lastnum, 1):
temp_list = dl_lists[num]
fileurl = temp_list[0]
dlfilename = temp_list[1]
download(fileurl, dlfilename)
print(f"Finished printing {num+1} of {lastnum} files")
print(f"Finished downloading all {lastnum} files")
you can use python script to download videos
you just have to create two txt files in your folder with names Source1, Source2
Awesome share,
Thank you Kindly