2024-01-20 03:25:50 +00:00
|
|
|
import subprocess
|
|
|
|
import os
|
|
|
|
import gzip
|
|
|
|
import re
|
2024-01-20 11:25:59 +00:00
|
|
|
import traceback
|
2024-01-20 03:25:50 +00:00
|
|
|
from multiprocessing import Pool
|
|
|
|
|
|
|
|
def extract_urls_from_file(file_path):
|
|
|
|
urls = []
|
|
|
|
try:
|
|
|
|
with gzip.open(file_path, 'rt', encoding='latin-1') as file:
|
|
|
|
for line in file:
|
|
|
|
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
|
|
|
|
line_urls = re.findall(url_pattern, line)
|
|
|
|
urls.extend(line_urls)
|
|
|
|
except (gzip.BadGzipFile, EOFError) as e:
|
|
|
|
print(f"Error while reading the compressed file '{file_path}': {e}")
|
|
|
|
except Exception as e:
|
|
|
|
print(f"An unexpected error occurred while processing '{file_path}': {e}")
|
|
|
|
print("Full traceback:")
|
2024-01-20 11:25:59 +00:00
|
|
|
traceback.print_exc()
|
2024-01-20 11:29:57 +00:00
|
|
|
|
2024-01-20 03:25:50 +00:00
|
|
|
return urls
|
|
|
|
|
|
|
|
def process_file(file_path):
|
|
|
|
print(f"Processing file: {file_path}")
|
|
|
|
|
|
|
|
# Extract URLs from the gzipped file
|
|
|
|
urls = extract_urls_from_file(file_path)
|
|
|
|
|
|
|
|
# Create the output file path with '_urls.txt' extension
|
|
|
|
output_file_path = os.path.splitext(file_path)[0] + '_urls.txt'
|
|
|
|
print(f"Output file path: {output_file_path}")
|
|
|
|
|
|
|
|
# Write the URLs to the output file
|
|
|
|
with open(output_file_path, 'w') as output_file:
|
|
|
|
output_file.write('\n'.join(urls))
|
|
|
|
print(f"URLs written to {output_file_path}")
|
|
|
|
|
2024-01-20 11:29:57 +00:00
|
|
|
# Use zstd command-line tool for compression
|
2024-01-20 12:28:23 +00:00
|
|
|
compressed_file_path = f'{output_file_path}.zst'
|
2024-01-20 12:33:28 +00:00
|
|
|
command_compress = f'zstd -T0 -12 --long {output_file_path} -o {compressed_file_path}'
|
2024-01-20 12:28:23 +00:00
|
|
|
subprocess.run(command_compress, shell=True)
|
|
|
|
print(f"Compressed file saved as '{compressed_file_path}'")
|
2024-01-20 03:25:50 +00:00
|
|
|
|
|
|
|
# Remove the original gzipped file
|
|
|
|
os.remove(file_path)
|
|
|
|
print(f"Original file removed: {file_path}")
|
|
|
|
|
2024-01-20 12:28:23 +00:00
|
|
|
# Remove the original _urls.txt file
|
|
|
|
os.remove(output_file_path)
|
|
|
|
print(f"Original file removed: {output_file_path}")
|
|
|
|
|
|
|
|
# Remove the line containing the filename (without "_urls.txt") from urls_to_download.txt
|
|
|
|
filename = os.path.basename(output_file_path).replace('_urls.txt', '')
|
|
|
|
command = f'sed -i "/{filename}/d" "urls_to_download.txt"'
|
|
|
|
if subprocess.run(command, shell=True).returncode == 0:
|
|
|
|
print(f"File {filename} has been successfully removed from urls_to_download.txt")
|
|
|
|
with open('urls_to_download.txt', 'r') as file:
|
|
|
|
remaining_count = sum(1 for line in file)
|
|
|
|
print(f"URLs remaining to be processed: {remaining_count}")
|
|
|
|
else:
|
|
|
|
print(f"Failed to remove {filename} from urls_to_download.txt")
|
|
|
|
|
2024-01-20 03:25:50 +00:00
|
|
|
def extract_urls_from_directory(directory_path):
|
|
|
|
file_list = sorted(os.listdir(directory_path))
|
|
|
|
pool = Pool(processes=7)
|
|
|
|
pool.map(process_file, [os.path.join(directory_path, filename) for filename in file_list if filename.endswith('.warc.gz')])
|
|
|
|
pool.close()
|
|
|
|
pool.join()
|
|
|
|
|
|
|
|
with open('urls_to_download.txt', 'r') as file:
|
|
|
|
urls = file.readlines()
|
|
|
|
|
|
|
|
urls = [url.strip() for url in urls]
|
|
|
|
|
|
|
|
batch_size = 48
|
|
|
|
concurrency_level = 4
|
|
|
|
batches = [urls[i:i+batch_size] for i in range(0, len(urls), batch_size)]
|
|
|
|
|
|
|
|
for batch in batches:
|
|
|
|
pool = Pool(processes=concurrency_level)
|
|
|
|
|
|
|
|
for url in batch:
|
2024-01-20 03:26:03 +00:00
|
|
|
command = f'axel -n 4 {url}'
|
2024-01-20 03:25:50 +00:00
|
|
|
pool.apply_async(subprocess.run, args=(command,), kwds={'shell': True})
|
|
|
|
|
|
|
|
pool.close()
|
|
|
|
pool.join()
|
|
|
|
|
|
|
|
extract_urls_from_directory(os.getcwd())
|