HOME/bin/books
frankdelange ecd79740fd Add ~/bin
2022-06-26 20:20:31 +02:00

1521 lines
52 KiB
Bash
Executable file
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env bash
#
# shellcheck disable=SC2034,SC1090,SC2254
shopt -s extglob
trap "trap_error" TERM
trap "trap_clean" EXIT
export TOP_PID=$$
version="0.7.1"
release="20200812"
functions="$(dirname "$0")/books_functions"
if [ -f "$functions" ]; then
source "$functions"
else
echo "$functions not found"
exit 1
fi
main () {
# PREFERENCES
config=${XDG_CONFIG_HOME:-$HOME/.config}/books.conf
# target directory for downloaded publications
target_directory="${HOME}/Books"
# when defined, subdirectory of $target_directory) for torrents
torrent_directory="torrents"
# when defined, location where files downloaded with torrent client end up
torrent_download_directory="/net/media/incoming"
# when true, launch cron jobs to copy files from torrent download directory
# to target directory using the correct name
torrent_cron_job=1
# default limit on queries
limit=1000
# maximum database age (in minutes) before attempting update
max_age=120
# topics are searched/displayed in this language ("en" or "ru")
language="en"
# database host
dbhost="localhost"
# database port
dbport="3306"
# database user
dbuser="libgen"
# default fields for fulltext search
default_fields="author,title"
# window/dialog heading for dialog and yad/zenity
list_heading="Select publication(s) for download:"
# add md5 to filename? Possibly superfluous as it can be derived from the file contents but a good guard against file corruption
filename_add_md5=0
# tool preferences, list preferred tool first
gui_tools="zenity|yad"
tui_tools="dialog|whiptail"
dl_tools="curl|wget"
parser_tools="xidel|hxwls"
pager_tools="less|more|cat"
# torrent helper tools need to support the following commands:
# add-selective <torrent_file> <md5> # downloads file <md5> from torrent <torrent_file>
# torrent-hash <torrent_file> # gets btih for <torrent_file>
# torrent-files <torrent_file> # lists files in <torrent_file>
# remove <btih> # remove active torrent with info-hash <btih>
# ls <btih> # show download status for active torrent with info-hash <btih>
# info <btih> # show extensive info (files, peers, etc) for torrent with info-hash <btih>
# active <btih> # return `true` if the torrent is active, `false` otherwise
torrent_tools="tm"
# don't use a pager when not running in a tty
[ ! -t 1 ] && pager_tools="cat"
# database names to use:
# books, books-all, nbook, xbook and xbook-all use the main libgen database
# fiction, nfiction and xfiction use the 'fiction' database
declare -A programs=(
[books]=libgen
[books-all]=libgen
[nbook]=libgen
[xbook]=libgen
[fiction]=libgen_fiction
[nfiction]=libgen_fiction
[xfiction]=libgen_fiction
[libgen_preview]=libgen # the actual database to use for preview is passed as a command line option
)
declare -A tables=(
[libgen]="(updated topics description hashes)"
[libgen_fiction]="(fiction fiction_description fiction_hashes)"
)
# searchable database fields
declare -A schema=(
[a]=author
[t]=title
[d]=edition
[e]=extension
[l]=language
[y]=year
[s]=series
[p]=publisher
[c]=city
[o]=topic_descr
[v]=volumeinfo
[r]=periodical
[g]=tags
[z]=locator
[i]=issn
[n]=asin
[q]=openlibraryid
[b]=identifierwodash
[m]=md5
[D]=ddc
[L]=lcc
)
# base url for covers
declare -A coverurl=(
[libgen]='http://31.42.184.140/covers'
[libgen_fiction]='http://31.42.184.140/fictioncovers'
)
# download archives
declare -A downloadurl=(
[libgen]="http://31.42.184.140/main"
[libgen_fiction]="http://31.42.184.140/fiction"
)
# torrent archives
declare -A torrenturl=(
[libgen]='http://libgen.rs/repository_torrent'
[libgen_fiction]='http://libgen.rs/fiction/repository_torrent'
)
# torrent file name prefixes
declare -A torrentprefix=(
[libgen]='r'
[libgen_fiction]='f'
)
# directory name prefixes (used with deep path (-$))
declare -A dirprefix=(
[libgen]="nonfiction"
[libgen_fiction]="fiction"
)
# ipfs gateway
ipfs_gw="https://cloudflare-ipfs.com"
# source config file if it exists
[[ -f ${config} ]] && source "${config}"
# (mostly) END OF PREFERENCES
# user agent string generator
declare -a adjective=(white red blue pink green brown dark light big small tiny earth glass air space forest lake sea ground fire crunchy spicy boring zonked blasted stoned fried flattened stretched smelly ugly obnoxious irritating whiny lazy)
declare -a critter=(goat frog hound fish lizard gator moose monkey whale hippo fox bird weasel owl cow pig hog donkey duck chicken dino sloth snake iguana gecko)
user_agent="Mozilla/5.0 ($(uname -s)) ${adjective[$((RANDOM%${#adjective[*]}))]^}${critter[$((RANDOM%${#critter[*]}))]^}/${RANDOM:0:3}.${RANDOM:0:1}.${RANDOM:0:4}.${RANDOM:0:2}"
# display columns for yad, zenity and pager
declare -A zenity_columns=(
[libgen]="--column Download --column MD5 --column Title --column Author --column Year --column Edition --column Publisher --column Language --column Topic --column Size --column Format --hide-column=2"
[libgen_fiction]="--column Download --column MD5 --column Title --column Author --column Year --column Edition --column Publisher --column Language --column Commentary --column Size --column Format --hide-column=2"
)
declare -A yad_columns=(
[libgen]="--column Download --column MD5:HD --column Title --column Author --column Year:NUM --column Edition --column Publisher --column Language --column Topic --column Size:NUM --column Format --column Info:HD --search-column=3 --print-column=2 --tooltip-column=12"
[libgen_fiction]="--column Download --column MD5:HD --column Title --column Author --column Year:NUM --column Edition --column Publisher --column Language --column Commentary --column Size:NUM --column Format --column Info:HD --search-column=3 --print-column=2 --tooltip-column=12"
)
# defines which columns are shown by default in a command line (i.e. pager) search
declare -A pager_columns=(
[libgen]="Title,Author,Year,Edition,Publisher,Language,Topic_descr,Filesize,Extension,Series,MD5"
[libgen_fiction]="Title,Author,Year,Edition,Publisher,Language,Commentary,Filesize,Extension,Series,MD5"
)
# used to select table prefix in command line search, contains regex patterns
declare -A attribute_columns=(
[topics]="topic_descr|topic_id"
[hashes]="crc32|edonkey|aich|sha1|tth|torrent|btih|sha256"
[description]="^descr$|toc"
)
# query output filter for different purposes
declare -A filters=(
[search]="cat"
[xsearch]="sed -e 's/&/&amp;/g'"
[filename]="sed -e 's/[^-[:alnum:]:;?!.,+@#%]/_/g;s/^\([-_]\)*//'"
[dirname]="sed -e 's/[^-[:alnum:]:;?!/.,+@#%]/_/g;s/^\([-_]\)*//'"
[id]="cat"
[ipfs_cid]="cat"
[extension]="cat"
[attributes]="cat"
[preview_dialog]="cat"
[preview_whiptail]="cat"
[preview_yad]="sed -e 's/&/&amp;/g'"
[preview_zenity]="sed -e 's/&/&amp;/g'"
)
# GETOPT config
search_options='@('$(echo "${!schema[@]}"|tr ' ' '|')')'
search_getopts="$(echo "${!schema[@]}"|tr ' ' ':'):"
# X11-related config
if xset q &>/dev/null; then
# used to size yad/zenity windows
min_screenres=$(xrandr|grep '\*'|sort -n|head -1|awk '{print $1}')
x_size=$(($(echo "$min_screenres"|cut -d 'x' -f 1) - 50))
y_size=$(($(echo "$min_screenres"|cut -d 'x' -f 2) - 30))
fi
# defines program behaviour to a large extent
program=$(basename "$0")
db="${programs[$program]}"
# arrays
declare -a query_result
# refs
declare -n sql_source
# misc
clauses=""
fields=""
opt_fields=""
show_fields=""
query=""
no_update=0
# this contains the columns in the current database, used to filter out unsupported search fields
current_fields="$(get_current_fields)"
# set ui_tool in order of preference (first found wins)
case "$program" in
xbook|xfiction|libgen_preview)
ui_tool=$(find_tool "$gui_tools")
;;
nbook|nfiction)
ui_tool=$(find_tool "$tui_tools")
;;
*)
ui_tool="pager"
;;
esac
check_settings
# PROCESS OPTIONS AND BUILD QUERY
while getopts ":${search_getopts}fF:hkxX#:@wu:I:U:=:j:J:M:$" OPTION
do
case $OPTION in
$search_options)
add_clause "${OPTION}" "${OPTARG}"
;;
h)
help
exit
;;
k)
create_symlinks
exit
;;
f)
fulltext=1
;;
F)
if [[ "$program" =~ ^books|^fiction ]]; then
show_fields="${OPTARG}"
# test for non-existent fields
get_fields > /dev/null
else
exit_with_error "-F FIELDS only works with _books_ and _fiction_"
fi
;;
x)
no_update=1
;;
X)
((exact_match++))
;;
'#')
limit="${OPTARG}"
;;
w)
preview=1
;;
u)
if is_true "$OPTARG"; then
if [[ -n "$torrent_tools" ]]; then
if find_tool $torrent_tools >/dev/null;then
use_torrent=1
unset use_ipfs
else
exit_with_error "-u: torrent helper script ($torrent_tools) not found"
fi
else
exit_with_error "-u needs torrent helper script, see \$torrent_tools"
fi
else
unset use_torrent
fi
;;
I)
if [[ "$OPTARG" == "show" ]]; then
get_ipfs_cid
exit
elif is_true "$OPTARG"; then
use_ipfs=1
unset use_torrent
else
unset use_ipfs
fi
;;
U)
get_torrentpath "${OPTARG}"
exit
;;
j)
get_filename "${OPTARG}"
exit
;;
J)
md5_download=1
md5="${OPTARG}"
;;
M)
md5_fast_path=1
md5="${OPTARG}"
;;
@)
source "$(which torsocks)" on
;;
=)
if [ -d "${OPTARG}" ]; then
target_directory="${OPTARG}"
else
exit_with_error "Directory ${OPTARG} does not exist"
fi
;;
$)
use_deep_path=1
;;
esac
done
# direct download - download single publication using MD5
if [[ -n "$md5_download" ]]; then
ui_tool="none"
db=$(get_db_for_md5 "$md5")
if [[ -n "$db" ]]; then
download "$db" "$md5"
else
exit_with_error "unknown md5, can not download"
fi
exit
fi
# direct info - get data on a single publication using MD5
if [[ -n "$md5_fast_path" ]]; then
case "$program" in
books|fiction)
pager_tools="cat"
query="and md5='${md5}'"
sql=$(prepare_sql "$db" "attributes")
run_query "$db" "attributes" "$sql"
list_${ui_tool} $preview
exit
;;
*)
exit_with_error "-M MD5 (fast path search) only works in _books_ and _fiction_"
esac
fi
# shift out options
shift $((OPTIND-1))
# process rest of command line
# this enables a simple 'books [like] ...' search pattern
operator=$1
if [[ $operator == like ]]; then
percent='%'
operator=' like '
shift
else
operator='='
fi
[[ $limit -gt 0 ]] && sql_limit="limit $limit"
[[ -z $opt_fields ]] && fields="$default_fields" || fields="${opt_fields%?}"
[[ $fulltext -gt 0 && ! $pattern =~ $opt_pattern ]] && pattern+="$opt_pattern"
if [[ $fulltext -gt 0 ]]; then
if [[ -z $* && -z $opt_pattern ]]; then
echo "can not perform a fulltext search without search words"
exit 1
else
unset clauses
query="and concat($fields) like '%${opt_pattern}${*}%'"
fi
else
if [[ -n $* ]]; then
query="and title${operator}'${percent}${*}${percent}'"
fi
fi
[[ -z ${query} && -z ${opt_pattern} ]] && exit_with_error "Empty query"
window_title="$query $clauses"
window_title=${window_title/and /}
# RUN QUERY AND PROCESS RESULTS
# update database before performing query
case "$program" in
books|books-all|nbook|xbook)
# this depends on the external 'update_libgen' script
# set no_update to 1 in case that script is not available, or when this script is run on a
# system without internet access
if [[ -z $no_update ]]; then
update_database
fi
;;
*)
# no database update for other databases (yet)
;;
esac
case "$program" in
# this searches both the normal 'updated' table as well as the 'updated_edited' table
books-all)
sql=$(prepare_sql "$db" "search_all" $ui_tool)
run_query "$db" "search" "$sql"
list_${ui_tool} $preview
;;
books|nbook|xbook|fiction|nfiction|xfiction)
if [[ ${program:0:1} == "x" ]]; then
filter="xsearch"
else
filter="search"
fi
sql=$(prepare_sql "$db" "search" $ui_tool)
run_query "$db" "$filter" "$sql"
list_${ui_tool} $preview
;;
# preview info on publication
libgen_preview)
db="$1"
shift
md5="$2" # preview gets fed the whole row of data by yad/zenity; md5 is the second item in this row
if [[ -n $md5 ]]; then
preview "$db" "$md5"
fi
;;
# default
*)
exit_with_error "unknown program: $program"
;;
esac
}
# DOWNLOAD
# feed this a list of hashes to attempt to download the related publications
download () {
db="$1"
shift
for md5 in "$@"; do
filename=$(get_filename "$md5" "$use_deep_path")
if [[ -n "$filename" ]]; then
if is_true "$use_torrent"; then
dl_src_torrent "$db" "$md5" "$filename"
elif is_true "$use_ipfs"; then
dl_src_ipfs "$db" "$md5" "$filename"
else
dl_src_direct "$db" "$md5" "$filename"
fi
fi
log_info "downloaded: $filename"
done
}
# this attempts to download the actual publication, using one of several download tools
# and reporting through one of several progress monitors
get_file () {
filename="$1"
shift
url="$*"
# strip quotes
filename=${filename%\'}
filename=${filename#\'}
dl_tool=$(find_tool "$dl_tools")
stdbuf=$(find_tool "stdbuf")
tmpdir=$(mktemp -d /tmp/libgen_dl.XXXXXX)
touch "${tmpdir}/progress"
case $dl_tool in
curl)
curl --user-agent "$user_agent" -L -o "$target_directory/$filename" "${url}" 2>"${tmpdir}/progress" &
echo $! >"${tmpdir}/dl_pid"
;;
wget)
wget --no-use-server-timestamps --user-agent "$user_agent" -O "$target_directory/$filename" "${url}" -o "${tmpdir}/progress" --progress=bar:force &
echo $! >"${tmpdir}/dl_pid"
;;
*)
exit
;;
esac
# mawk does not support pattern repetition, hence the funny patterns
case $ui_tool in
dialog)
"$stdbuf" -oL tail -f "${tmpdir}/progress"|stdbuf -oL tr '\r' '\n'|awk -W posix_space -W interactive 'NF==12 { print "XXX\n" $(NF-11) "\nDownloading:\n'"$filename"'\n" $(NF-8) " of " $(NF-10) " at " $(NF) "B/s (" $(NF-1) " left)"; system(""); } /^.......................%\[....................\]/ { split($2,A,/\%/); print "XXX\n" A[1]"\nDownloading:\n'"$filename"'\n"$4 " at " $5 " (" $7 " left)"; system("");}' 2>/dev/null| dialog --backtitle "Download: $filename" --gauge "Starting download..." 10 120 2>/dev/null &
echo $! >"${tmpdir}/pager_pid"
;;
whiptail)
"$stdbuf" -oL tail -f "${tmpdir}/progress"|stdbuf -oL tr '\r' '\n'|awk -W posix_space -W interactive 'NF==12 { print "XXX\n" $(NF-11) "\nDownloading:\n'"$filename"'\n" $(NF-8) " of " $(NF-10) " at " $(NF) "B/s (" $(NF-1) " left)"; system(""); } /^.......................%\[....................\]/ { split($2,A,/\%/); print "XXX\n" A[1]"\nDownloading:\n'"$filename"'\n"$4 " at " $5 " (" $7 " left)"; system("");}' 2>/dev/null| whiptail --clear --backtitle "Download: $filename" --gauge "Starting download..." 10 0 0 2>/dev/null &
echo $! >"${tmpdir}/pager_pid"
;;
yad)
"$stdbuf" -oL tail -f "${tmpdir}/progress"|stdbuf -oL tr '\r' '\n'|awk -W posix_space -W interactive 'NF==12 { print $(NF-11) "\n#'"$filename"' (" $(NF) "B/s)"; system(""); } /^.......................%\[....................\]/ { split($2,A,/\%/); print A[1]"\n#'"$filename"' (" $5 ")"; system("");}' 2>/dev/null| yad --window-icon='gtk-save' --title='Downloading' --progress --progress-text="$filename" --auto-close 2>/dev/null &
echo $! >"${tmpdir}/pager_pid"
;;
zenity)
"$stdbuf" -oL tail -f "${tmpdir}/progress"|stdbuf -oL tr '\r' '\n'|awk -W posix_space -W interactive 'NF=12 { print $(NF-11) "\n#'"$filename"' (" $(NF) "B/s)"; system(""); } /^.......................%\[....................\]/ { split($2,A,/\%/); print A[1]"\n#'"$filename"' (" $5 ")"; system("");}' 2>/dev/null| zenity --window-icon='gtk-save' --title='Downloading' --progress --auto-close 2>/dev/null &
echo $! >"${tmpdir}/pager_pid"
;;
*)
"$stdbuf" -oL tail -f "${tmpdir}/progress" &
echo $! >"${tmpdir}/pager_pid"
;;
esac
trap 'kill $(<"${tmpdir}"/dl_pid) $(<"${tmpdir}"/pager_pid) 2>/dev/null; rm -rf "${tmpdir}";' EXIT
# wait for the pager to finish (or be closed by the user) and/or the download to finish
# this replaces the (buggy) auto-kill functionality of yad and zenity (dialog does not have any auto-kill)
while (kill -0 "$(<"${tmpdir}/pager_pid")" 2>/dev/null); do
if (kill -0 "$(<"${tmpdir}/dl_pid")" 2>/dev/null); then
sleep 1
else
break
fi
done
}
dl_src_direct () {
db="$1"
shift
md5="$1"
shift
filename="$*"
case "$db" in
libgen)
url="${downloadurl[$db]}/$(get_torrent "${db}" "${md5}")/${md5,,}/placeholder"
;;
libgen_fiction)
extension=$(get_attr 'extension' "${db}" "${md5}")
url="${downloadurl[$db]}/$(get_torrent "${db}" "${md5}")/${md5,,}.${extension,,}/placeholder"
;;
*)
exit_with_error "no direct download available for $db"
;;
esac
if ! url_available "$url"; then
url=""
parser=$(find_tool "$parser_tools")
if [[ $parser == "xidel" ]]; then
url=$(xidel --user-agent "$user_agent" -s "${downloadurl[$db]}/${md5}" -e '//td[@id="info"]/h2/a/@href');
elif [[ $parser == "hxwls" ]]; then
url=$(hxwls "${downloadurl[$db]}/${md5}"|head -2|tail -1)
fi
fi
[[ -n "$url" ]] && get_file "'${filename}'" "${url}"
}
dl_src_ipfs () {
db="$1"
shift
md5="$1"
shift
filename="$*"
ipfs_cid=$(get_attr 'ipfs_cid' "${db}" "${md5}")
if [[ -z $ipfs_cid ]]; then
echo "ipfs_cid not found, trying direct download..."
dl_src_direct "${db}" "${md5}" "${filename}"
else
url="${ipfs_gw}/ipfs/${ipfs_cid}"
get_file "'${filename}'" "${url}"
fi
}
dl_src_torrent () {
db="$1"
shift
md5="$1"
shift
dest_filename="$*"
torrent_abspath=$(get_torrent_filename "${db}" "${md5}" 1)
ttool=$(find_tool "$torrent_tools")
if dl_torrent "$db" "$md5"; then
torrent_filepath=$($ttool torrent-files "$torrent_abspath"|grep -i "$md5")
if [[ -f "$torrent_abspath" ]]; then
torrent_job=$(date +%Y%m%d-%H%M%S)-$(get_torrent "$db" "$md5")"-$md5.job"
torrent_log="$target_directory/${torrent_directory:+$torrent_directory/}${torrent_job}"
torrent_btih=$($ttool torrent-hash "$torrent_abspath")
cat <<- EOT > "$torrent_log"
#!/usr/bin/env bash
# command: "$ttool add-selective $torrent_abspath $md5"
tdir="$torrent_download_directory"
tpath="$torrent_filepath"
dest="$target_directory/$dest_filename"
btih="$torrent_btih"
ttool="$ttool"
cronjob_remove () {
if cronjob_active; then
(crontab -l) 2>/dev/null | grep -v "$torrent_log" | sort | uniq | crontab -
fi
}
torrent_restart () { $ttool add-selective "$torrent_abspath" "$md5"; }
direct_download () { exec $program -u n -J "$md5"; }
cronjob_active () { crontab -l|grep -q "$torrent_log"; }
EOT
cat <<- 'EOT' >> "$torrent_log"
torrent_remove () { $ttool remove "$btih"; }
torrent_status () { $ttool ls "$btih"; }
torrent_info () { $ttool info "$btih"; }
torrent_active () { $ttool active "$btih"; }
job_status () {
if [[ -f "$dest" ]]; then echo -e "job finished, file copied to destination:\n$dest";
elif [[ -f "$tdir/$tpath" ]]; then echo "torrent downloaded";
if cronjob_active; then echo "cronjob active, wait for file to be copied to destination";
else echo "run this job without options to copy file to destination";
fi
elif torrent_active; then echo "torrent active:";torrent_status;
else echo "job inactive, file not downloaded, -R or -D to retry";
fi
}
check_md5 () {
md5_real=$(md5sum "$1"|awk '{print $1}')
md5=$(basename "$tpath")
[[ "${md5_real,,}" == "${md5,,}" ]]
}
copy_file () {
if [[ -f "$tdir/$tpath" ]]; then
if check_md5 "$tdir/$tpath"; then
install -D "$tdir/$tpath" "$dest"
else
false
fi
else
false
fi
}
show_help () {
cat <<-EOHELP
Use: bash jobid.job [-s] -[i] [-r] [-R] [-D] [-h] [torrent_download_directory]
Copies file from libgen/libgen_fiction torrent to correct location and name
-S show job status
-s show torrent status (short)
-i show torrent info (long)
-I show target file name
-r remove torrent and cron jobs
-R restart torrent download (does not restart cron job)
-D direct download (removes torrent and cron jobs)
-h show this help message
EOHELP
}
if [[ "$1" == "-r" ]]; then torrent_remove; cronjob_remove; exit; fi
if [[ "$1" == "-R" ]]; then torrent_restart; exit; fi
if [[ "$1" == "-D" ]]; then torrent_remove; direct_download; exit; fi
if [[ "$1" == "-s" ]]; then torrent_status; exit; fi
if [[ "$1" == "-S" ]]; then job_status; exit; fi
if [[ "$1" == "-i" ]]; then torrent_info; exit; fi
if [[ "$1" == "-I" ]]; then echo "$dest"; exit; fi
if [[ "$1" == "-h" ]]; then show_help; exit; fi
if [[ "$1" =~ \-.* ]]; then echo "unknown option: $1"; show_help; exit; fi
if torrent_active; then echo "torrent has not finished downloading"; exit 10; fi
if [[ -z "$tdir" ]]; then if [[ -d "$1" ]]; then tdir="$1"; else show_help; exit; fi; fi
count=0
until [[ $count == 5 ]]; do
if copy_file; then break; fi
sleep 5
((count++))
done
if [[ -f "$dest" ]]; then
if ! check_md5 "$dest"; then
echo "download corrupted, md5 does not match"
fi
else
echo "download failed"
fi
cronjob_remove
exit
# torrent client output under this line
:<<'END_OF_LOG'
EOT
(${ttool} add-selective "$torrent_abspath" "$md5";echo END_OF_LOG) >> "$torrent_log" 2>&1 &
echo -e "torrent job started, job script:\n$torrent_log"
# launch cron job?
if [[ -n "$torrent_download_directory" && "$torrent_cron_job" -gt 0 ]]; then
add_cron_job "bash $torrent_log"
echo "torrent cron job started"
fi
fi
else
echo "try direct download (-u n)"
fi
}
dl_torrent () {
db="$1"
md5="$2"
torrent_filename="$(get_torrent_filename "${db}" "${md5}")"
torrent_abspath="$(get_torrent_filename "${db}" "${md5}" 1)"
if [[ ! -f "$torrent_abspath" ]]; then
url="${torrenturl[$db]}/${torrent_filename}"
if url_available "$url"; then
get_file "'${torrent_directory:+$torrent_directory/}${torrent_filename}'" "${url}"
else
echo "Torrent $torrent_filename not available"
false
fi
fi
}
# DATABASE
# currently only the main libgen db can be updated through the api
update_database () {
db="${programs[books]}"
last_update=$(($(date +%s)-$(date -d "$(get_time_last_modified "$db")" +%s)))
if [[ $last_update -gt $((max_age*60)) ]]; then
if [[ $no_update -eq 0 ]]; then
update_libgen=$(find_tool "update_libgen")
"$update_libgen"
else
echo "The database was last updated $((last_update/60)) minutes ago, consider updating"
fi
fi
}
get_current_fields () {
db="${programs[$program]}"
declare -a db_tables="${tables[$db]}"
for table in "${db_tables[@]}"; do
dbx "$db" "describe $table;"|awk '{print tolower($1)}'
done
}
get_time_last_modified () {
dbx "$db" 'select MAX(TimeLastModified) FROM updated;'
}
add_clause () {
option="$1"
shift
pattern="$*"
db="${programs[$program]}"
if [[ ${pattern:0:1} == '-' ]]; then
# option as argument, rewind OPTIND
((OPTIND-=1))
unset pattern
fi
if [[ $current_fields =~ ${schema[${option}]} ]]; then
if [[ -n $pattern ]]; then
# escape ' " \ % _
pattern=${pattern//\\/\\\\}
pattern=${pattern//\'/\\\'}
pattern=${pattern//\"/\\\"}
pattern=${pattern//%/\\%}
pattern=${pattern//_/\\_}
[[ ! $opt_pattern =~ $pattern ]] && opt_pattern+=" $pattern"
if [ -z "$exact_match" ]; then
clauses+=" and ${schema[${option}]} like '%${pattern}%'"
elif [ "$exact_match" -eq 1 ]; then
clauses+=" and ${schema[${option}]} like '${pattern}%'"
elif [ "$exact_match" -eq 2 ]; then
clauses+=" and ${schema[${option}]} like '%${pattern}'"
else
clauses+=" and ${schema[${option}]}='${pattern}'"
fi
fi
[[ ! $opt_fields =~ ${schema[${option}]} ]] && opt_fields+="${schema[${option}]},"
else
echo "warning: option -$option ignored (database $db does not contain column ${schema[${option}]})"
fi
}
# the 'pager' gets special treatment as it likes its lines unbroken...
run_query () {
db="$1"
shift
filter="$1"
shift
sql="$*"
declare -a line
query_result=()
while IFS=$'\t' read -ra line; do
if [[ $ui_tool == "pager" ]]; then
IFS='' query_result+=("$(printf '%s\t' "${line[@]}")")
else
query_result+=("${line[@]}")
fi
done < <(dbx "$db" "$sql"|(eval "${filters[$filter]}"))
}
get_attr () {
role="$1"
db="$2"
md5="$3"
# special case for filename with md5
if [[ "$role" == 'filename' && $filename_add_md5 -gt 0 ]]; then
role='filename_md5'
fi
sql=$(prepare_sql "${db}" "${role}")
run_query "${db}" "${role}" "${sql}"
if [[ ${#query_result[@]} -gt 0 ]]; then
echo "${query_result[0]}"
fi
}
get_torrent () {
db="$1"
md5="$2"
id=$(get_attr 'id' "${db}" "${md5}")
[[ -n "$id" ]] && echo "$((id-id%1000))"
}
# this creates leading directories when called with 2 parameters (value of $2 does not matter)
get_filename () {
md5="$1"
create="$2"
db="${programs[$program]}"
filename=$(get_attr "filename" "$db" "$md5")
# trim overly long filenames to 255 characters
[[ ${#filename} -gt 255 ]] && filename="${filename:0:126}...${filename: -126}"
if [[ -n $filename ]]; then
if [[ -n $use_deep_path ]]; then
dirname="${dirprefix[$db]}/$(get_attr 'dirname' "$db" "$md5")"
if [[ -n $dirname && -n $create ]]; then
mkdir -p "${target_directory}/${dirname}"
filename="${dirname}/${filename}"
fi
fi
echo "$filename"
fi
}
get_torrentpath () {
md5="$1"
db="${programs[$program]}"
torrent=$(get_torrent "$db" "$md5")
[[ -n "$torrent" ]] && echo "$torrent/$md5"
}
get_attributes () {
md5="$1"
db="${programs[$program]}"
role="attributes"
get_attr "$role" "$db" "$md5"
}
# PREVIEWS
preview () {
db=$1
shift
for md5 in "$@"; do
preview_${ui_tool} "$db" "$md5"
done
}
# don't mess with the 'ugly formatting', the embedded newlines are part of the preview dialog
preview_dialog () {
db="$1"
md5="$2"
if [[ -n $db && -n $md5 ]]; then
sql=$(prepare_sql "$db" "preview" "$ui_tool")
run_query "$db" "preview_dialog" "${sql}"
if [[ ${#query_result[@]} -gt 0 ]]; then
filename=$(get_attr 'filename' "$db" "$md5")
exec 3>&1
dialog_result=$(dialog --backtitle "${program} - preview" --colors --cr-wrap --no-collapse --extra-button --ok-label "Download" --extra-label "Skip" --no-cancel --yesno \
"\Z1Author\Zn: ${query_result[0]}
\Z1Title\Zn: ${query_result[1]}
\Z1Volume\Zn: ${query_result[2]} \Z1Series\Zn: ${query_result[3]} \Z1Edition\Zn: ${query_result[4]}
\Z1Year\Zn: ${query_result[5]} \Z1Publisher\Zn: ${query_result[6]}
\Z1Language\Zn: ${query_result[7]} \Z1Size\Zn: ${query_result[8]} \Z1Type\Zn: ${query_result[9]}
\Z1OLID\Zn: ${query_result[10]} \Z1ISBN\Zn: ${query_result[11]} \Z1MD5\Zn: ${md5^^}
\Z1Filename\Zn: ${filename}
${query_result[12]}" \
0 0 2>&1 1>&3)
dialog_exit=$?
exec 3>&-
if [[ $dialog_exit -eq 0 ]]; then
download "${db}" "${md5}"
fi
fi
fi
}
preview_whiptail () {
db=$1
md5=$2
if [[ -n $db && -n $md5 ]]; then
sql=$(prepare_sql "$db" "preview" "$ui_tool")
run_query "$db" "preview_whiptail" "${sql}"
if [[ ${#query_result[@]} -gt 0 ]]; then
filename=$(get_attr 'filename' "$db" "$md5")
exec 3>&1
whiptail_result=$(whiptail --backtitle "${program} - preview" --yes-button "Download" --no-button "Skip" --nocancel --yesno \
"Author: ${query_result[0]}
Title: ${query_result[1]}
Volume: ${query_result[2]} Series: ${query_result[3]} Edition: ${query_result[4]}
Year: ${query_result[5]} Publisher: ${query_result[6]}
Language: ${query_result[7]} Size: ${query_result[8]} Type: ${query_result[9]}
OLID: ${query_result[10]} ISBN: ${query_result[11]} MD5: ${md5^^}
Filename: ${filename}
${query_result[12]}" \
0 0 2>&1 1>&3)
whiptail_exit=$?
exec 3>&-
if [[ $whiptail_exit -eq 0 ]]; then
download "${db}" "${md5}"
fi
fi
fi
}
preview_zenity () {
db="$1"
md5="$2"
if [[ -n $db && -n $md5 ]]; then
sql=$(prepare_sql "$db" "preview" "$ui_tool")
run_query "$db" "preview_zenity" "${sql}"
if [[ ${#query_result[@]} -gt 0 ]]; then
filename=$(get_attr 'filename' "$db" "$md5")
info="<table><tr><td><b>Author</b>:</td><td colspan='5'>${query_result[0]}</td></tr><tr><td><b>Title</b>:</td><td colspan='5'>${query_result[1]}</td></tr><tr><td><b>Volume</b>:</td><td>${query_result[2]}</td><td><b>Series</b>:</td><td>${query_result[3]}</td><td><b>Edition</b>:</td><td>${query_result[4]}</td></tr><tr><td><b>Year</b>:</td><td>${query_result[5]}</td><td><b>Publisher</b>:</td><td>${query_result[6]}</td></tr><tr><td><b>Language</b>:</td><td>${query_result[7]}</td><td><b>Size</b>:</td><td>${query_result[8]}</td><td><b>Type</b>:</td><td>${query_result[9]}</td></tr><tr><td><b>OLID</b>:</td><td>${query_result[10]}</td><td><b>ISBN</b>:</td><td>${query_result[11]}</td><td><b>MD5</b>:</td><td>${md5^^}</td></tr></table><span style='font-size:x-small;'><pre>${filename}</pre></span><hr><table><tr><td style='width:25%;'><img style='width:95%;' src='${coverurl[$db]}/${query_result[13]}'></td><td style='vertical-align:top;'>$(strip_html "${query_result[12]}")</td></tr></table>"
if zenity_result=$(echo "$info"|zenity --width $x_size --height $y_size --text-info --html --ok-label "Download" --cancel-label "Skip" --filename=/dev/stdin 2>/dev/null); then
download "${db}" "${md5}"
fi
fi
fi
}
preview_yad () {
db="$1"
md5="$2"
for md5 in "$@"; do
sql=$(prepare_sql "$db" "preview" "$ui_tool")
run_query "$db" "preview_yad" "${sql}"
if [[ ${#query_result[@]} -gt 0 ]]; then
filename=$(get_attr 'filename' "$db" "$md5")
info="<table><tr><td><b>Author</b>:</td><td colspan='5'>${query_result[0]}</td></tr><tr><td><b>Title</b>:</td><td colspan='5'>${query_result[1]}</td></tr><tr><td><b>Volume</b>:</td><td>${query_result[2]}</td><td><b>Series</b>:</td><td>${query_result[3]}</td><td><b>Edition</b>:</td><td>${query_result[4]}</td></tr><tr><td><b>Year</b>:</td><td>${query_result[5]}</td><td><b>Publisher</b>:</td><td>${query_result[6]}</td></tr><tr><td><b>Language</b>:</td><td>${query_result[7]}</td><td><b>Size</b>:</td><td>${query_result[8]}</td><td><b>Type</b>:</td><td>${query_result[9]}</td></tr><tr><td><b>OLID</b>:</td><td>${query_result[10]}</td><td><b>ISBN</b>:</td><td>${query_result[11]}</td><td><b>MD5</b>:</td><td>${md5^^}</td></tr></table><span style='font-size:x-small;'><pre>${filename}</pre></span><hr><table><tr><td style='width:25%;'><img style='width:95%;' src='${coverurl[$db]}/${query_result[13]}'></td><td style='vertical-align:top;'>$(strip_html "${query_result[12]}")</td></tr></table>"
if yad_result=$(echo "$info"|yad --width $x_size --height $y_size --html --button='gtk-cancel:1' --button='Download!filesave!Download this publication:0' --filename=/dev/stdin 2>/dev/null); then
download "${db}" "${md5}"
fi
fi
done
}
# LIST VIEWS
list_pager () {
show_preview="$1" # ignored, no preview possible using pager
pager=$(find_tool "$pager_tools")
[[ $pager == "less" ]] && pager_options="-S"
if [[ ${#query_result[@]} -gt 0 ]]; then
(for index in "${!query_result[@]}"; do
echo "${query_result[$index]}"
done)|column -t -n -x -s $'\t'|$pager $pager_options
fi
}
list_dialog () {
show_preview="$1"
if [[ ${#query_result[@]} -gt 0 ]]; then
exec 3>&1
dialog_result=$(dialog --separate-output --no-tags --backtitle "$program - search" --title "$window_title" --checklist "${list_heading}" 0 0 0 -- "${query_result[@]}" 2>&1 1>&3)
dialog_exit=$?
exec 3>&-
clear
if [[ -n $dialog_result ]]; then
if [[ $show_preview -gt 0 ]]; then
# shellcheck disable=SC2086
preview "$db" $dialog_result
else
# shellcheck disable=SC2086
download "$db" $dialog_result
fi
fi
fi
}
# current whiptail (as of the date of writing, 20160326) has a bug which makes it ignore --notags
# https://bugzilla.redhat.com/show_bug.cgi?id=1215239
list_whiptail () {
show_preview="$1"
if [[ ${#query_result[@]} -gt 0 ]]; then
exec 3>&1
whiptail_result=$(whiptail --separate-output --notags --backtitle "$program - search" --title "$window_title" --checklist "${list_heading}" 0 0 0 -- "${query_result[@]}" 2>&1 1>&3)
whiptail_exit=$?
exec 3>&-
clear
if [[ -n $whiptail_result ]]; then
if [[ $show_preview -gt 0 ]]; then
# shellcheck disable=SC2086
preview "$db" $whiptail_result
else
# shellcheck disable=SC2086
download "$db" $whiptail_result
fi
fi
fi
}
list_yad () {
show_preview="$1"
db="${programs[$program]}"
if [[ ${#query_result[@]} -gt 0 ]]; then
# shellcheck disable=SC2086
yad_result=$(yad --width $x_size --height $y_size --separator=" " --title "$program :: ${window_title}" --text "${list_heading}" --list --checklist --dclick-action='bash -c "libgen_preview '"$db"' %s" &' ${yad_columns[$db]} -- "${query_result[@]}" 2>/dev/null)
if [[ -n $yad_result ]]; then
if [[ $show_preview -gt 0 ]]; then
# shellcheck disable=SC2086
preview "$db" $yad_result
else
# shellcheck disable=SC2086
download "$db" $yad_result
fi
fi
fi
}
# zenity does not support the '--' end of options convention leading to problems when the query result contains dashes,
# hence these are replaced by underscores in the query_result.
list_zenity () {
show_preview="$1"
db="${programs[$program]}"
if [[ ${#query_result[@]} -gt 0 ]]; then
# shellcheck disable=SC2086
zenity_result=$(zenity --width $x_size --height $y_size --separator=" " --title "$program :: ${window_title}" --text "${list_heading}" --list --checklist ${zenity_columns[$db]} "${query_result[@]}" 2>/dev/null)
if [[ -n $zenity_result ]];then
if [[ $show_preview -gt 0 ]]; then
# shellcheck disable=SC2086
preview "$db" $zenity_result
else
# shellcheck disable=SC2086
download "$db" $zenity_result
fi
fi
fi
}
# SQL
get_fields () {
# shellcheck disable=SC2086
db="${programs[$program]}"
[[ -z "$show_fields" ]] && show_fields="${pager_columns[$db]}"
IFS=',' read -ra fields <<< "$show_fields"
result=""
for field in "${fields[@]}"; do
[[ ! "${current_fields[*],,}" =~ ${field,,} ]] && exit_with_error "no such field: $field"
table="m"
for category in "${!attribute_columns[@]}"; do
if [[ "${field,,}" =~ ${attribute_columns[$category],,} ]]; then
table="${category:0:1}"
break
fi
done
result+="${result:+,}greatest(${table}.${field}, '-')"
done
echo -n "$result"
}
prepare_sql () {
db="$1"
role="$2"
ui_tool="$3"
# SQL to:
# build filenames...
declare -A sql_filename=(
[libgen]="select concat_ws('.',concat_ws('-', trim(Series), trim(Author), trim(Title), trim(Year), trim(Publisher), trim(language)), trim(extension)) from updated where md5='${md5}' limit 1;"
[libgen_fiction]="select concat_ws('.',concat_ws('-', trim(Series), trim(Author), trim(Title), trim(Year), trim(Publisher), trim(language)), trim(extension)) from fiction where md5='${md5}' limit 1;"
)
declare -A sql_filename_md5=(
[libgen]="select concat_ws('.',concat_ws('-', trim(Series), trim(Author), trim(Title), trim(Year), trim(Publisher), trim(language), trim(md5)), trim(extension)) from updated where md5='${md5}' limit 1;"
[libgen_fiction]="select concat_ws('.',concat_ws('-', trim(Series), trim(Author), trim(Title), trim(Year), trim(Publisher), trim(language), trim(md5)), trim(extension)) from fiction where md5='${md5}' limit 1;"
)
# build directory names...
declare -A sql_dirname=(
[libgen]="select concat_ws('/', trim(language), regexp_replace(trim(topic_descr),'"'[\\\\]+'"','/'), trim(Author), trim(Series)) as dirname from updated as u left join topics as t on u.topic=t.topic_id and t.lang='${language}' where md5='${md5}' limit 1;"
[libgen_fiction]="select concat_ws('/', trim(language), trim(Author), trim(Series)) as dirname from fiction where md5='${md5}' limit 1;"
)
# get id
declare -A sql_id=(
[libgen]="select id from updated where md5='${md5}' limit 1;"
[libgen_fiction]="select id from fiction where md5='${md5}' limit 1;"
)
# get extension
declare -A sql_extension=(
[libgen]="select extension from updated where md5='${md5}' limit 1;"
[libgen_fiction]="select extension from fiction where md5='${md5}' limit 1;"
)
# get attributes
declare -A sql_attributes=(
[libgen]="select $(get_fields) from updated as m left join description as d on m.md5=d.md5 left join topics as t on m.topic=t.topic_id and t.lang='${language}' left join hashes as h on m.md5=h.md5 where m.md5='${md5}' limit 1;"
[libgen_fiction]="select $(get_fields) from fiction as m left join fiction_description as d on m.md5=d.md5 left join fiction_hashes as h on m.md5=h.md5 where m.md5='${md5}' limit 1;"
)
# get hashes
declare -A sql_sha1s=(
[libgen]="select sha1 from hashes where md5='$md5' limit 1;"
[libgen_fiction]="select sha1 from fiction_hashes where md5='$md5' limit 1;"
)
# get ipfs content id hash
declare -A sql_ipfs_cid=(
[libgen]="select ipfs_cid from hashes where md5='${md5}' limit 1;"
[libgen_fiction]="select ipfs_cid from fiction_hashes where md5='${md5}' limit 1;"
)
# preview publication...
declare -A sql_preview_dialog=(
[libgen]="select greatest(Author, '-'), greatest(Title, '-'), greatest(VolumeInfo, '-'), greatest(Series, '-'), greatest(Edition, '-'), greatest(Year, '-'), greatest(Publisher, '-'), greatest(language, '-'), greatest(filesize, '-'), greatest(extension, '-'), greatest(OpenLibraryID, '-'), greatest(IdentifierWODash, '-'), greatest(ifnull(descr,'-'), '-'), Coverurl from updated left join description on updated.md5=description.md5 where updated.md5='${md5}' limit 1;"
[libgen_fiction]="select greatest(Author, '-'), greatest(Title, '-'), greatest(Issue, '-'), greatest(Series, '-'), greatest(Edition, '-'), greatest(Year, '-'), greatest(Publisher, '-'), greatest(Language, '-'), greatest(Filesize, '-'), greatest(Extension, '-'), greatest(Identifier,'-'), greatest(Commentary,'-'), greatest(ifnull(descr,'-'), '-'), Coverurl from fiction as f left join fiction_description as d on f.md5=d.md5 where f.md5='${md5}' limit 1;"
)
declare -n sql_preview_whiptail="sql_preview_dialog"
declare -A sql_preview_zenity=(
[libgen]="select greatest(Author, '-'), greatest(Title, '-'), greatest(VolumeInfo, '-'), greatest(Series, '-'), greatest(Edition, '-'), greatest(Year, '-'), greatest(Publisher, '-'), greatest(language, '-'), greatest(filesize, '-'), greatest(extension, '-'), greatest(OpenLibraryID, '-'), greatest(IdentifierWODash, '-'), greatest(ifnull(descr,'-'), '-'), Coverurl from updated left join description on updated.md5=description.md5 where updated.md5='${md5}' limit 1;"
[libgen_fiction]="select greatest(Author, '-'), greatest(Title, '-'), greatest(Issue, '-'), greatest(Series, '-'), greatest(Edition, '-'), greatest(Year, '-'), greatest(Publisher, '-'), greatest(language, '-'), greatest(filesize, '-'), greatest(extension, '-'), greatest(Identifier, '-'), greatest(Commentary,'-'), greatest(ifnull(descr,'-'), '-'), Coverurl from fiction as f left join fiction_description as d on f.md5=d.md5 where f.md5='${md5}' limit 1;"
)
declare -A sql_preview_yad=(
[libgen]="select greatest(Author, '-'), greatest(Title, '-'), greatest(VolumeInfo, '-'), greatest(Series, '-'), greatest(Edition, '-'), greatest(Year, '-'), greatest(Publisher, '-'), greatest(language, '-'), greatest(filesize, '-'), greatest(extension, '-'), greatest(OpenLibraryID, '-'), greatest(IdentifierWODash, '-'), greatest(ifnull(descr,'-'), '-'), Coverurl from updated left join description on updated.md5=description.md5 where updated.md5='${md5}' limit 1;"
[libgen_fiction]="select greatest(Author, '-'), greatest(Title, '-'), greatest(Issue, '-'), greatest(Series, '-'), greatest(Edition, '-'), greatest(Year, '-'), greatest(Publisher, '-'), greatest(language, '-'), greatest(filesize, '-'), greatest(extension, '-'), greatest(Identifier, '-'), greatest(Commentary,'-'), greatest(ifnull(descr,'-'), '-'), Coverurl from fiction as f left join fiction_description as d on f.md5=d.md5 where f.md5='${md5}' limit 1;"
)
# search...
declare -A sql_search_pager=(
[libgen]="select $(get_fields) from updated as m left join description as d on m.md5=d.md5 left join topics as t on m.topic=t.topic_id and t.lang='${language}' where TRUE ${query} ${clauses} ${sql_limit};"
[libgen_fiction]="select $(get_fields) from fiction as m left join fiction_description as d on m.md5=d.md5 where TRUE ${query} ${clauses} ${sql_limit};"
)
declare -A sql_search_dialog=(
[libgen]="select u.MD5, concat_ws('|', rpad(greatest(u.Title,'-'),70,' '), rpad(greatest(u.Author,'-'), 30,' '), rpad(greatest(u.Year,'-'), 5,' '), rpad(greatest(u.Edition,'-'), 20, ' '), rpad(greatest(u.Publisher,'-'), 30,' '), rpad(greatest(u.Language,'-'), 10, ' '), rpad(greatest(ifnull(t.Topic_descr, '-'),'-'),30,' '), rpad(greatest(u.Filesize,'-'),10,' '), rpad(greatest(u.Extension,'-'),6,' ')), 'off' from updated as u left join topics as t on u.topic=t.topic_id and t.lang='${language}' where TRUE ${query} ${clauses} ${sql_limit};"
[libgen_fiction]="select f.MD5, concat_ws('|', rpad(greatest(f.Title,'-'),70,' '), rpad(greatest(f.Author,'-'), 30,' '), rpad(greatest(f.Year,'-'), 5,' '), rpad(greatest(f.Edition,'-'), 20, ' '), rpad(greatest(f.Publisher,'-'), 30,' '), rpad(greatest(f.Language,'-'), 10, ' '), rpad(greatest(f.Series,'-'),30,' '), rpad(greatest(f.Filesize,'-'),10,' '), rpad(greatest(f.Extension,'-'),6,' ')), 'off' from fiction as f where TRUE ${query} ${clauses} ${sql_limit};"
)
declare -n sql_search_whiptail="sql_search_dialog"
declare -A sql_search_yad=(
[libgen]="select 'FALSE', u.MD5, left(greatest(u.Title,'-'),70), left(greatest(u.Author, '-'),50),greatest(u.Year, '-'),left(greatest(u.Edition, '-'),20),left(greatest(u.Publisher, '-'),30),greatest(u.language, '-'), left(greatest(ifnull(t.Topic_descr,'-'),'-'),30), greatest(u.Filesize,'-'), greatest(u.Extension, '-'), concat('<b>Title</b>: ',Title, ' <b>Author</b>: ', Author, ' <b>Path</b>: ', Locator) from updated as u left join topics as t on u.topic=t.topic_id and t.lang='${language}' where TRUE ${query} ${clauses} ${sql_limit};"
[libgen_fiction]="select 'FALSE', f.MD5, left(greatest(f.Title,'-'),70), left(greatest(f.Author, '-'),50),greatest(f.Year, '-'),left(greatest(f.Edition, '-'),20),left(greatest(f.Publisher, '-'),30),greatest(f.language, '-'), left(greatest(f.Series,'-'),30), greatest(f.Filesize,'-'), greatest(f.Extension, '-'), concat('<b>Title</b>: ',Title, ' <b>Author</b>: ', Author, ' <b>Path</b>: ', Locator) from fiction as f where TRUE ${query} ${clauses} ${sql_limit};"
)
declare -A sql_search_zenity=(
[libgen]="select 'FALSE', u.MD5, left(greatest(u.Title,'-'),70), left(greatest(u.Author, '-'),50),greatest(u.Year, '-'),left(greatest(u.Edition, '-'),20),left(greatest(u.Publisher, '-'),30),greatest(u.language, '-'), left(greatest(ifnull(t.Topic_descr,'-'),'-'),30), greatest(u.Filesize,'-'), greatest(u.Extension, '-') from updated as u left join topics as t on u.topic=t.topic_id and t.lang='${language}' where TRUE ${query} ${clauses} ${sql_limit};"
[libgen_fiction]="select 'FALSE', f.MD5, left(greatest(f.Title,'-'),70), left(greatest(f.Author, '-'),50),greatest(f.Year, '-'),left(greatest(f.Edition, '-'),20),left(greatest(f.Publisher, '-'),30),greatest(f.language, '-'), left(greatest(f.Series,'-'),30), greatest(f.Filesize,'-'), greatest(f.Extension, '-') from fiction as f where TRUE ${query} ${clauses} ${sql_limit};"
)
declare -A sql_search_all_pager=(
[libgen]="(select u.Title, u.Author,u.Year,u.Edition,u.Publisher,u.language,ifnull(t.Topic_descr, '-'), u.Filesize, u.Extension, u.Locator, md5 from updated as u left join topics as t on u.topic=t.topic_id and t.lang='${language}' where TRUE ${query} ${clauses} ${sql_limit}) UNION (select u.Title, u.Author,u.Year,u.Edition,u.Publisher,u.language,t.Topic_descr,u.Filesize, u.Extension, u.Locator, u.md5 from updated_edited as u left join topics as t on u.topic=t.topic_id and t.lang='${language}' where TRUE ${query} ${clauses} ${sql_limit});"
)
if [[ -n $ui_tool ]]; then
sql_source="sql_${role}_${ui_tool}"
else
sql_source="sql_${role}"
fi
echo "${sql_source[$db]}"
}
# UTILITY FUNCTIONS
get_db_for_md5 () {
md5="$1"
for dbs in "${!tables[@]}"; do
fmd5=$(get_attr "md5" "$db" "$md5")
if [[ "$fmd5" == "$md5" ]]; then
db=$dbs
fi
done
if [[ -n "$db" ]]; then
echo -n "$db"
fi
}
get_torrent_filename () {
db="$1"
md5="$2"
absolute="$3"
echo -n "${absolute:+$target_directory/${torrent_directory:+$torrent_directory/}}${torrentprefix[$db]}_$(get_torrent "${db}" "${md5}").torrent"
}
create_symlinks () {
basedir="$(dirname "$0")"
sourcefile="$(readlink -e "$0")"
for name in "${!programs[@]}"; do
if [[ ! -e "$basedir/$name" ]]; then
ln -s "$sourcefile" "$basedir/$name"
fi
done
exit
}
check_settings () {
# does target directory exist?
[[ ! -d "$target_directory" ]] && exit_with_error "target_directory $target_directory does not exist";
# when defined, does torrent download directory exist?
[[ -n "$torrent_download_directory" && ! -d "$torrent_download_directory" ]] && exit_with_error "torrent_download_directory $torrent_download_directory does not exist";
# when defined, does torrent helper script exist?
if [[ -n "$use_torrent" ]]; then
if [[ -z "$torrent_tools" ]]; then
exit_with_error "-u needs torrent helper script, see \$torrent_tools"
elif ! find_tool "$torrent_tools" >/dev/null; then
exit_with_error "-u: torrent helper script ($torrent_tools) not found"
fi
fi
if [[ "$use_torrent" && "$use_ipfs" ]]; then
exit_with_error "can not use_torrent and use_ipfs at the same time, check $config"
fi
}
cleanup () {
if [[ $ui_tool == "whiptail" ]]; then
reset
fi
}
# HELP
help () {
echo "$(basename "$(readlink -f "$0")")" "version $version"
cat <<- 'EOF'
Use: books OPTIONS [like] [<PATTERN>]
[B]ooks - which is only one of the names this program goes by - is a
front-end for accessing a locally accessible libgen / libgen_fiction database
instance, offering versatile search and download directly from the command
line. The included update_libgen tool is used to keep the database up to date -
if the database is older than a user-defined value it is updated before the
query is executed. This generally only takes a few seconds, but it might take
longer on a slow connection or after a long update interval. Updating can be
temporarily disabled by using the -x command line option. To refresh the
database(s) from a dump file use the included refresh_libgen program.
When books, nbook and/or xbook are regularly used the database should be kept
up to date automatically. In that case it is only necessary to use
refresh_libgen to refresh the database when you get a warning from
update_libgen about unknown columns in the API response.
If the programs have not been used for a while it can take a long time - and a
lot of data transfer - to update the database through the API (which is what
update_libgen does). Especially when using the compact database it can be
quicker to use refresh_libgen to just pull the latest dump instead of waiting
for update_libgen to do its job.
The fiction database can not be updated through the API (yet), so for
that databases refresh_libgen is currently the canonical way to get the latest
version.
SEARCH BY FIELD:
This is the default search mode. If no field options are given this searches
the Title field for the PATTERN. Search uses partial matching by default, use
-X for matching words starting with PATTERN, -XX to match words which end with
PATTERN and -XXX for exact matching.
FULLTEXT SEARCH (-f):
Performs a pattern match search over all fields indicated by the options. If no
field options are given, perform a pattern match search over the Author and
Title fields.
Depending on which name this program is executed under it behaves differently:
books: query database and show results, direct download with md5
books-all: query database and show results (exhaustive search over all tables, slow)
nbook: select publications for download from list (terminal-based)
xbook: select publications for download from list (GUI)
fiction: query database and show results (using 'fiction' database), direct download with md5
nfiction: select publications for download from list (terminal-based, use 'fiction' database)
xfiction: select publications for download from list (GUI, use 'fiction' database)
OPTIONS
EOF
for key in "${!schema[@]}"; do
echo " -${key} search on ${schema[$key]^^}"
done
cat <<- 'EOF'
-f fulltext search
searches for the given words in the fields indicated by the other options.
when no other options are given this will perform a pattern match search
for the given words over the Author and Title fields.
-X search for fields starting with PATTERN
-XX search for fields ending with PATTERN
-XXX search for fields exacly matching PATTERN
-w preview publication info before downloading (cover preview only in GUI tools)
select one or more publication to preview and press enter/click OK.
double-clicking a result row also shows a preview irrespective of this option,
but this only works when using the yad gui tool
-= DIR set download location to DIR
-$ use extended path when downloading:
nonfiction/[topic/]author[/series]/title
fiction/language/author[/series]/title
-u BOOL use bittorrent (-u 1 or -u y) or direct download (-u 0 or -u n)
this parameter overrides the default download method
bittorrent download depends on an external helper script
to interface with a bittorrent client
-I BOOL use ipfs (-I 1 or -I y) or direct download (-I 0 or -I n)
this parameter overrides the default download method
ipfs download depends on a functioning ipfs gateway.
default gateway is hosted by Cloudfront, see https://ipfs.io/
for instructions on how to run a local gateway
-U MD5 print torrent path (torrent#/md5) for given MD5
-j MD5 print filename for given MD5
-J MD5 download file for given MD5
can be combined with -u to download with bittorrent
-M MD5 fast path search on md5, only works in _books_ and _fiction_
can be combined with -F FIELDS to select fields to be shown
output goes directly to the terminal (no pager)
-F FIELDS select which fields to show in pager output
-# LIMIT limit search to LIMIT hits (default: 1000)
-x skip database update
(currently only the 'libgen' database can be updated)
-@ use torsocks to connect to the libgen server(s). You'll need to install
torsocks before using this option; try this in case your ISP
(or a transit provider somewhere en-route) blocks access to libgen
-k install symlinks for all program invocations
-h show this help message
EXAMPLES
Do a pattern match search on the Title field for 'ilias' and show the results in the terminal
$ books like ilias
Do an exact search on the Title field for 'The Odyssey' and show the results in the terminal
$ books 'the odyssey'
Do an exact search on the Title field for 'The Odyssey' and the Author field for 'Homer', showing
the result in the terminal
$ books -X -t 'The Odyssey' -a 'Homer'
Do the same search as above, showing the results in a list on the terminal with checkboxes to select
one or more publications for download
$ nbook -X -t 'The Odyssey' -a 'Homer'
A case-insensitive pattern search using an X11-based interface; use bittorrent (-u) when downloading files
$ xbook -u y -t 'the odyssey' -a 'homer'
Do a fulltext search over the Title, Author, Series, Periodical and Publisher fields, showing the
results in a terminal-based checklist for download after preview (-w)
$ nbook -w -f -t -a -s -r -p 'odyssey'
Walk over a directory of publications, compute md5 and use this to generate file names:
$ find /path/to/publications -type f|while read f; do books -j $(md5sum "$f"|awk '{print $1}');done
As above, but print torrent number and path in torrent file
$ find /path/to/publications -type f|while read f; do books -U $(md5sum "$f"|awk '{print $1}');done
Find publications by author 'thucydides' and show their md5,title and year in the terminal
$ books -a thucydides -F md5,title,year
Get data on a single publication using fast path MD5 search, show author, title and extension
$ books -M 51b4ee7bc7eeb6ed7f164830d5d904ae -F author,title,extension
Download a publication using its MD5 (-J MD5), using IPFS (-I y) to download
$ books -I y -J 51b4ee7bc7eeb6ed7f164830d5d904ae
EOF
}
main "$@"