Skip to content

Commit

Permalink
Update direct_link_generator.py
Browse files Browse the repository at this point in the history
  • Loading branch information
weebzone authored Nov 21, 2022
1 parent 4a96069 commit 605a664
Showing 1 changed file with 71 additions and 0 deletions.
71 changes: 71 additions & 0 deletions bot/helper/mirror_utils/download_utils/direct_link_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,84 @@ def direct_link_generator(link: str):
return unified(link)
elif is_udrive_link(link):
return udrive(link)
elif is_rock_link(link):
return rock(link)
elif is_try2link_link(link):
return try2link(link)
elif is_ez4_link(link):
return ez4(link)
elif any(x in link for x in fmed_list):
return fembed(link)
elif any(x in link for x in ['sbembed.com', 'watchsb.com', 'streamsb.net', 'sbplay.org']):
return sbembed(link)
else:
raise DirectDownloadLinkException(f'No Direct link function found for {link}')

def rock(url: str) -> str:
client = cloudscraper.create_scraper(allow_brotli=False)
if 'rocklinks.net' in url:
DOMAIN = "https://blog.disheye.com"
else:
DOMAIN = "https://go.techyjeeshan.xyz"

url = url[:-1] if url[-1] == '/' else url

code = url.split("/")[-1]
if 'rocklinks.net' in url:
final_url = f"{DOMAIN}/{code}?quelle="
else:
final_url = f"{DOMAIN}/{code}?quelle="

resp = client.get(final_url)
soup = BeautifulSoup(resp.content, "html.parser")

try: inputs = soup.find(id="go-link").find_all(name="input")
except: return "Incorrect Link"

data = { input.get('name'): input.get('value') for input in inputs }

h = { "x-requested-with": "XMLHttpRequest" }

time.sleep(10)
r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
try:
return r.json()['url']
except: return "Something went wrong :("

def try2link(url):
client = create_scraper()

url = url[:-1] if url[-1] == '/' else url

params = (('d', int(time.time()) + (60 * 4)),)
r = client.get(url, params=params, headers= {'Referer': 'https://newforex.online/'})

soup = BeautifulSoup(r.text, 'html.parser')
inputs = soup.find_all("input")
data = { input.get('name'): input.get('value') for input in inputs }
time.sleep(7)

headers = {'Host': 'try2link.com', 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'https://try2link.com', 'Referer': url}

bypassed_url = client.post('https://try2link.com/links/go', headers=headers,data=data)
return bypassed_url.json()["url"]

def ez4(url):
client = cloudscraper.create_scraper(allow_brotli=False)
DOMAIN = "https://ez4short.com"
ref = "https://techmody.io/"
h = {"referer": ref}
resp = client.get(url,headers=h)
soup = BeautifulSoup(resp.content, "html.parser")
inputs = soup.find_all("input")
data = { input.get('name'): input.get('value') for input in inputs }
h = { "x-requested-with": "XMLHttpRequest" }
time.sleep(8)
r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
try:
return r.json()['url']
except: return "Something went wrong :("

def zippy_share(url: str) -> str:
base_url = re_search('http.+.zippyshare.com', url).group()
response = rget(url)
Expand Down

0 comments on commit 605a664

Please sign in to comment.