rebuild_tags(): use array for more robust file handling
This commit is contained in:
parent
e3cf406bd1
commit
a674ec5837
1 changed files with 19 additions and 14 deletions
33
bb.sh
33
bb.sh
|
@ -729,8 +729,8 @@ tags_in_post() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Finds all posts referenced in a number of tags.
|
# Finds all posts referenced in a number of tags.
|
||||||
# Arguments are tags
|
# Arguments are tags.
|
||||||
# Prints one line with space-separated tags to stdout
|
# Prints file names to stdout, one per line.
|
||||||
posts_with_tags() {
|
posts_with_tags() {
|
||||||
(($# < 1)) && return
|
(($# < 1)) && return
|
||||||
set -- "${@/#/$prefix_tags}"
|
set -- "${@/#/$prefix_tags}"
|
||||||
|
@ -748,31 +748,35 @@ posts_with_tags() {
|
||||||
# rebuild_tags "one_post.html another_article.html" "example-tag another-tag"
|
# rebuild_tags "one_post.html another_article.html" "example-tag another-tag"
|
||||||
# mind the quotes!
|
# mind the quotes!
|
||||||
rebuild_tags() {
|
rebuild_tags() {
|
||||||
if (($# < 2)); then
|
local IFS=$'\n' # word splitting only on newline; make $* expand with newline as separator
|
||||||
|
if (($# < 1)); then
|
||||||
# will process all files and tags
|
# will process all files and tags
|
||||||
files=$(ls -t ./*.html)
|
files=( $(ls -t ./*.html) )
|
||||||
all_tags=yes
|
all_tags=yes
|
||||||
else
|
else
|
||||||
# will process only given files and tags
|
# will process only given files and tags
|
||||||
files=$(printf '%s\n' $1 | sort -u)
|
for ((i=1; i<=$#; i++)); do
|
||||||
files=$(ls -t $files)
|
[[ ${!i} == --tags ]] && break
|
||||||
tags=$2
|
done
|
||||||
|
files=( $(ls -t $(sort -u <<< "${*:1:$((i-1))}")) )
|
||||||
|
tags=( "${@:$((i+1)):$#}" )
|
||||||
|
all_tags=''
|
||||||
fi
|
fi
|
||||||
echo -n "Rebuilding tag pages "
|
echo -n "Rebuilding tag pages "
|
||||||
n=0
|
n=0
|
||||||
if [[ -n $all_tags ]]; then
|
if [[ -n $all_tags ]]; then
|
||||||
rm ./"$prefix_tags"*.html &> /dev/null
|
rm -f ./"$prefix_tags"*.html
|
||||||
else
|
else
|
||||||
for i in $tags; do
|
for i in "${tags[@]}"; do
|
||||||
rm "./$prefix_tags$i.html" &> /dev/null
|
rm -f "./$prefix_tags$i.html"
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
# First we will process all files and create temporal tag files
|
# First we will process all files and create temporal tag files
|
||||||
# with just the content of the posts
|
# with just the content of the posts
|
||||||
tmpfile=tmp.$RANDOM
|
tmpfile=tmp.$RANDOM
|
||||||
while [[ -f $tmpfile ]]; do tmpfile=tmp.$RANDOM; done
|
while [[ -f $tmpfile ]]; do tmpfile=tmp.$RANDOM; done
|
||||||
while IFS='' read -r i; do
|
for i in "${files[@]}"; do
|
||||||
is_boilerplate_file "$i" && continue;
|
is_boilerplate_file "$i" && continue
|
||||||
echo -n "."
|
echo -n "."
|
||||||
if [[ -n $cut_do ]]; then
|
if [[ -n $cut_do ]]; then
|
||||||
get_html_file_content 'entry' 'entry' 'cut' <"$i" | awk "/$cut_line/ { print \"<p class=\\\"readmore\\\"><a href=\\\"$i\\\">$template_read_more</a></p>\" ; next } 1"
|
get_html_file_content 'entry' 'entry' 'cut' <"$i" | awk "/$cut_line/ { print \"<p class=\\\"readmore\\\"><a href=\\\"$i\\\">$template_read_more</a></p>\" ; next } 1"
|
||||||
|
@ -780,11 +784,12 @@ rebuild_tags() {
|
||||||
get_html_file_content 'entry' 'entry' <"$i"
|
get_html_file_content 'entry' 'entry' <"$i"
|
||||||
fi >"$tmpfile"
|
fi >"$tmpfile"
|
||||||
for tag in $(tags_in_post "$i"); do
|
for tag in $(tags_in_post "$i"); do
|
||||||
if [[ -n $all_tags || " $tags " == *" $tag "* ]]; then
|
# if either all tags or array tags[] contains $tag...
|
||||||
|
if [[ -n $all_tags || $'\n'"${tags[*]}"$'\n' == *$'\n'"$tag"$'\n'* ]]; then
|
||||||
cat "$tmpfile" >> "$prefix_tags$tag".tmp.html
|
cat "$tmpfile" >> "$prefix_tags$tag".tmp.html
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
done <<< "$files"
|
done
|
||||||
rm "$tmpfile"
|
rm "$tmpfile"
|
||||||
# Now generate the tag files with headers, footers, etc
|
# Now generate the tag files with headers, footers, etc
|
||||||
while IFS='' read -r i; do
|
while IFS='' read -r i; do
|
||||||
|
|
Loading…
Reference in a new issue