vbvvb

 

import requests from bs4 import BeautifulSoup from urllib.parse import urljoin def extract_links(url): try: response = requests.get(url) response.raise_for_status() soup = BeautifulSoup(response.text, 'html.parser') links = set() for a in soup.find_all('a', href=True): full_url = urljoin(url, a['href']) links.add(full_url) for video in soup.find_all('video'): for source in video.find_all('source', src=True): full_url = urljoin(url, source['src']) links.add(full_url) for img in soup.find_all('img', src=True): full_url = urljoin(url, img['src']) links.add(full_url) with open('links.txt', 'w') as file: for link in links: file.write(link + '\n') print(f'{len(links)} Links wurden gefunden und in "links.txt" gespeichert.') except Exception as e: print(f'Fehler beim Abrufen der Webseite: {e}') if __name__ == '__main__': website_url = input('Gib die URL der Webseite ein: ') extract_links(website_url)

Kommentare

Beliebte Posts aus diesem Blog

cc

ttt