import subprocess import os import gzip import re from multiprocessing import Pool def extract_urls_from_file(file_path): urls = [] try: with gzip.open(file_path, 'rt', encoding='latin-1') as file: # Process the file line by line for line in file: # Extract URLs using regular expression url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') line_urls = re.findall(url_pattern, line) urls.extend(line_urls) except (gzip.BadGzipFile, EOFError) as e: print(f"Skipping file '{file_path}' due to an error while reading the compressed file: {e}") except Exception as e: print(f"An unexpected error occurred while processing '{file_path}': {e}") return urls def process_file(file_path): # Extract URLs from the gzipped file urls = extract_urls_from_file(file_path) # Create the output file path with '_urls.txt' extension output_file_path = os.path.splitext(file_path)[0] + '_urls.txt' # Write the URLs to the output file with open(output_file_path, 'w') as output_file: output_file.write('\n'.join(urls)) print(f"URLs extracted from '{file_path}' and saved to '{output_file_path}'") # Remove the original gzipped file os.remove(file_path) def extract_urls_from_directory(directory_path): # Get the list of files in the directory and sort them file_list = sorted(os.listdir(directory_path)) # Create a multiprocessing Pool with the number of processes # based on the available CPU cores pool = Pool(processes=12) # Map the file processing function to the list of files pool.map(process_file, [os.path.join(directory_path, filename) for filename in file_list if filename.endswith('.wat.gz')]) # Close the pool to free up resources pool.close() pool.join() # Read the URLs from the file with open('urls_to_download.txt', 'r') as file: urls = file.readlines() # Remove any leading/trailing whitespace from the URLs urls = [url.strip() for url in urls] # Define the batch size batch_size = 100 # Define the concurrency level (number of download processes running concurrently) concurrency_level = 10 # Split the URLs into batches batches = [urls[i:i+batch_size] for i in range(0, len(urls), batch_size)] # Iterate over the batches and download URLs for batch in batches: # Create a multiprocessing Pool with the specified concurrency level pool = Pool(processes=concurrency_level) for url in batch: # Create the command to download the URL using axel with 3 connections command = f'axel -n 3 {url}' # Start the subprocess in the background pool.apply_async(subprocess.run, args=(command,), kwds={'shell': True}) # Close the pool to indicate that no more tasks will be added pool.close() # Wait for all processes in the pool to finish pool.join() # Extract URLs from the downloaded files extract_urls_from_directory(os.getcwd()) # Run the urlextractor_archiveteam.sh script subprocess.run(['bash', 'urlextractor_archiveteam.sh'])