Upload files to "/"
This commit is contained in:
commit
5dd8e4f9ce
94
multithread_script_5.0.py
Normal file
94
multithread_script_5.0.py
Normal file
@ -0,0 +1,94 @@
|
||||
import subprocess
|
||||
import os
|
||||
import gzip
|
||||
import re
|
||||
from multiprocessing import Pool
|
||||
|
||||
def extract_urls_from_file(file_path):
|
||||
urls = []
|
||||
try:
|
||||
with gzip.open(file_path, 'rt', encoding='latin-1') as file:
|
||||
# Process the file line by line
|
||||
for line in file:
|
||||
# Extract URLs using regular expression
|
||||
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
|
||||
line_urls = re.findall(url_pattern, line)
|
||||
urls.extend(line_urls)
|
||||
except (gzip.BadGzipFile, EOFError) as e:
|
||||
print(f"Skipping file '{file_path}' due to an error while reading the compressed file: {e}")
|
||||
except Exception as e:
|
||||
print(f"An unexpected error occurred while processing '{file_path}': {e}")
|
||||
|
||||
return urls
|
||||
|
||||
def process_file(file_path):
|
||||
# Extract URLs from the gzipped file
|
||||
urls = extract_urls_from_file(file_path)
|
||||
|
||||
# Create the output file path with '_urls.txt' extension
|
||||
output_file_path = os.path.splitext(file_path)[0] + '_urls.txt'
|
||||
|
||||
# Write the URLs to the output file
|
||||
with open(output_file_path, 'w') as output_file:
|
||||
output_file.write('\n'.join(urls))
|
||||
|
||||
print(f"URLs extracted from '{file_path}' and saved to '{output_file_path}'")
|
||||
|
||||
# Remove the original gzipped file
|
||||
os.remove(file_path)
|
||||
|
||||
def extract_urls_from_directory(directory_path):
|
||||
# Get the list of files in the directory and sort them
|
||||
file_list = sorted(os.listdir(directory_path))
|
||||
|
||||
# Create a multiprocessing Pool with the number of processes
|
||||
# based on the available CPU cores
|
||||
pool = Pool(processes=12)
|
||||
|
||||
# Map the file processing function to the list of files
|
||||
pool.map(process_file, [os.path.join(directory_path, filename) for filename in file_list if filename.endswith('.wat.gz')])
|
||||
|
||||
# Close the pool to free up resources
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
# Read the URLs from the file
|
||||
with open('urls_to_download.txt', 'r') as file:
|
||||
urls = file.readlines()
|
||||
|
||||
# Remove any leading/trailing whitespace from the URLs
|
||||
urls = [url.strip() for url in urls]
|
||||
|
||||
# Define the batch size
|
||||
batch_size = 100
|
||||
|
||||
# Define the concurrency level (number of download processes running concurrently)
|
||||
concurrency_level = 3
|
||||
|
||||
# Split the URLs into batches
|
||||
batches = [urls[i:i+batch_size] for i in range(0, len(urls), batch_size)]
|
||||
|
||||
# Iterate over the batches and download URLs
|
||||
for batch in batches:
|
||||
# Create a multiprocessing Pool with the specified concurrency level
|
||||
pool = Pool(processes=concurrency_level)
|
||||
|
||||
for url in batch:
|
||||
# Create the command to download the URL using axel with 3 connections
|
||||
command = f'axel -n 3 {url}'
|
||||
|
||||
# Start the subprocess in the background
|
||||
pool.apply_async(subprocess.run, args=(command,), kwds={'shell': True})
|
||||
|
||||
# Close the pool to indicate that no more tasks will be added
|
||||
pool.close()
|
||||
|
||||
# Wait for all processes in the pool to finish
|
||||
pool.join()
|
||||
|
||||
# Extract URLs from the downloaded files
|
||||
extract_urls_from_directory(os.getcwd())
|
||||
|
||||
# Run the urlextractor_archiveteam.sh script
|
||||
subprocess.run(['bash', 'urlextractor_archiveteam.sh'])
|
||||
|
28
urlextractor_archiveteam.sh
Normal file
28
urlextractor_archiveteam.sh
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
directory=$(dirname "$0")
|
||||
|
||||
gzip_file() {
|
||||
file="$1"
|
||||
gzip "$file"
|
||||
}
|
||||
|
||||
export -f gzip_file
|
||||
|
||||
for file in "$directory"/*_urls.txt; do
|
||||
filename=$(basename "$file")
|
||||
grep -E "http(s)?://(www\.)?mediafire.com" "$file" | sort -u >> "/opt/commoncrawl/export/mediafire_urls.txt"
|
||||
grep "https://t.me/" "$file" | sort -u >> "/opt/commoncrawl/export/t.me_urls.txt"
|
||||
grep "https://telegram.me/" "$file" | sort -u >> "/opt/commoncrawl/export/telegram.me_urls.txt"
|
||||
grep -E "http(s)?://(www\.)?i.imgur.com" "$file" | sort -u >> "/opt/commoncrawl/export/imgur_urls.txt"
|
||||
grep "sitemap.xml" "$file" | sort -u >> "/opt/commoncrawl/export/sitemap_urls.txt"
|
||||
grep "https://cdn.discordapp.com/" "$file" | sort -u >> "/opt/commoncrawl/export/discord_urls.txt"
|
||||
|
||||
if [[ $filename != "mediafire_urls.txt" && $filename != "t.me_urls.txt" && $filename != "telegram.me_urls.txt" && $filename != "sitemap_urls.txt" ]]; then
|
||||
parallel gzip_file ::: "$file" &
|
||||
fi
|
||||
done
|
||||
|
||||
# Wait for all gzip processes to finish
|
||||
wait
|
||||
|
Loading…
Reference in New Issue
Block a user