Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'or pass a list of links via stdin, but not both.\n',
color='red',
)
raise SystemExit(2)
elif import_str:
import_path = save_stdin_to_sources(import_str, out_dir=out_dir)
else:
import_path = save_file_to_sources(import_path, out_dir=out_dir)
check_dependencies()
# Step 1: Load list of links from the existing index
# merge in and dedupe new links from import_path
all_links: List[Link] = []
new_links: List[Link] = []
all_links = load_main_index(out_dir=out_dir)
if import_path:
all_links, new_links = import_new_links(all_links, import_path, out_dir=out_dir)
# Step 2: Write updated index with deduped old and new links back to disk
write_main_index(links=all_links, out_dir=out_dir)
if index_only:
return all_links
# Step 3: Run the archive methods for each link
links = all_links if update_all else new_links
log_archiving_started(len(links))
idx: int = 0
link: Link = None # type: ignore
try:
for idx, link in enumerate(links):
))
finally:
timer.end()
if not len(links):
log_removal_finished(0, 0)
raise SystemExit(1)
log_list_finished(links)
log_removal_started(links, yes=yes, delete=delete)
timer = TimedProgress(360, prefix=' ')
try:
to_keep = []
all_links = load_main_index(out_dir=out_dir)
for link in all_links:
should_remove = (
(after is not None and float(link.timestamp) < after)
or (before is not None and float(link.timestamp) > before)
or link_matches_filter(link, filter_patterns, filter_type)
)
if not should_remove:
to_keep.append(link)
elif should_remove and delete:
shutil.rmtree(link.link_dir, ignore_errors=True)
finally:
timer.end()
write_main_index(links=to_keep, out_dir=out_dir, finished=True)
log_removal_finished(len(all_links), len(to_keep))
try:
for idx, link in enumerate(links_after_timestamp(links, resume)):
archive_link(link, overwrite=overwrite, out_dir=link.link_dir)
except KeyboardInterrupt:
log_archiving_paused(len(links), idx, link.timestamp if link else '0')
raise SystemExit(0)
except:
print()
raise
log_archiving_finished(len(links))
# Step 4: Re-write links index with updated titles, icons, and resources
all_links = load_main_index(out_dir=out_dir)
write_main_index(links=list(all_links), out_dir=out_dir, finished=True)
return all_links
def get(self, request):
if not request.user.is_authenticated and not PUBLIC_INDEX:
return redirect(f'/admin/login/?next={request.path}')
all_links = load_main_index(out_dir=OUTPUT_DIR)
meta_info = load_main_index_meta(out_dir=OUTPUT_DIR)
context = {
'updated': meta_info['updated'],
'num_links': meta_info['num_links'],
'links': all_links,
'VERSION': VERSION,
'FOOTER_INFO': FOOTER_INFO,
}
return render(template_name=self.template, request=request, context=context)
def list_links(filter_patterns: Optional[List[str]]=None,
filter_type: str='exact',
after: Optional[float]=None,
before: Optional[float]=None,
out_dir: str=OUTPUT_DIR) -> Iterable[Link]:
check_data_folder(out_dir=out_dir)
all_links = load_main_index(out_dir=out_dir)
for link in all_links:
if after is not None and float(link.timestamp) < after:
continue
if before is not None and float(link.timestamp) > before:
continue
if filter_patterns:
if link_matches_filter(link, filter_patterns, filter_type):
yield link
else:
yield link
assert os.path.exists(DATABASE_FILE)
# from django.contrib.auth.models import User
# if IS_TTY and not User.objects.filter(is_superuser=True).exists():
# print('{green}[+] Creating admin user account...{reset}'.format(**ANSI))
# call_command("createsuperuser", interactive=True)
print()
print('{green}[*] Collecting links from any existing indexes and archive folders...{reset}'.format(**ANSI))
all_links: Dict[str, Link] = {}
if existing_index:
all_links = {
link.url: link
for link in load_main_index(out_dir=out_dir, warn=False)
}
print(' √ Loaded {} links from existing main index.'.format(len(all_links)))
# Links in data folders that dont match their timestamp
fixed, cant_fix = fix_invalid_folder_locations(out_dir=out_dir)
if fixed:
print(' {lightyellow}√ Fixed {} data directory locations that didn\'t match their link timestamps.{reset}'.format(len(fixed), **ANSI))
if cant_fix:
print(' {lightyellow}! Could not fix {} data directory locations due to conflicts with existing folders.{reset}'.format(len(cant_fix), **ANSI))
# Links in JSON index but not in main index
orphaned_json_links = {
link.url: link
for link in parse_json_main_index(out_dir)
if link.url not in all_links
}
filter_patterns: Optional[List[str]]=None,
filter_type: Optional[str]=None,
status: Optional[str]=None,
after: Optional[str]=None,
before: Optional[str]=None,
out_dir: str=OUTPUT_DIR) -> List[Link]:
"""Import any new links from subscriptions and retry any previously failed/skipped links"""
check_dependencies()
check_data_folder(out_dir=out_dir)
# Step 1: Load list of links from the existing index
# merge in and dedupe new links from import_path
all_links: List[Link] = []
new_links: List[Link] = []
all_links = load_main_index(out_dir=out_dir)
# Step 2: Write updated index with deduped old and new links back to disk
write_main_index(links=list(all_links), out_dir=out_dir)
# Step 3: Filter for selected_links
matching_links = list_links(
filter_patterns=filter_patterns,
filter_type=filter_type,
before=before,
after=after,
)
matching_folders = list_folders(
links=list(matching_links),
status=status,
out_dir=out_dir,
)
def info(out_dir: str=OUTPUT_DIR) -> None:
"""Print out some info and statistics about the archive collection"""
check_data_folder(out_dir=out_dir)
print('{green}[*] Scanning archive collection main index...{reset}'.format(**ANSI))
print(f' {out_dir}/*')
num_bytes, num_dirs, num_files = get_dir_size(out_dir, recursive=False, pattern='index.')
size = printable_filesize(num_bytes)
print(f' Size: {size} across {num_files} files')
print()
links = list(load_main_index(out_dir=out_dir))
num_json_links = len(links)
num_sql_links = sum(1 for link in parse_sql_main_index(out_dir=out_dir))
num_html_links = sum(1 for url in parse_html_main_index(out_dir=out_dir))
num_link_details = sum(1 for link in parse_json_links_details(out_dir=out_dir))
users = get_admins().values_list('username', flat=True)
print(f' > JSON Main Index: {num_json_links} links'.ljust(36), f'(found in {JSON_INDEX_FILENAME})')
print(f' > SQL Main Index: {num_sql_links} links'.ljust(36), f'(found in {SQL_INDEX_FILENAME})')
print(f' > HTML Main Index: {num_html_links} links'.ljust(36), f'(found in {HTML_INDEX_FILENAME})')
print(f' > JSON Link Details: {num_link_details} links'.ljust(36), f'(found in {ARCHIVE_DIR_NAME}/*/index.json)')
print(f' > Admin: {len(users)} users {", ".join(users)}'.ljust(36), f'(found in {SQL_INDEX_FILENAME})')
if num_html_links != len(links) or num_sql_links != len(links):
print()
print(' {lightred}Hint:{reset} You can fix index count differences automatically by running:'.format(**ANSI))
print(' archivebox init')