#!/bin/bash
WGET_UA="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Viewer/99.9.8782.87"

echo "Scraping CTV for London link rot."

WORKDIR="./tmp"
mkdir "$WORKDIR"

SEARCH_END="FALSE"
BATCH_SIZE=100
i=0
while [[ $SEARCH_END == "FALSE" ]]; do
	curl \
	-H "Content-Type: application/json" \
	-X GET 'https://api.queryly.com/json.aspx?queryly_key=8eea35fe3eed49d5&query={"includeSections":"/london"}&endindex='$i'&batchsize=100' \
	| jq > $WORKDIR/tmp_$i.json

    j=0
	while (( j <= 99 )); do
		echo "Cur. article: $(( i + j )), date: $(jq .items[$j]\ .pubdate $WORKDIR/tmp_$i.json)"
		ARTICLE_TIMESTAMP=$(jq ".items[$j] .pubdateunix" $WORKDIR/tmp_$i.json)
		ARTICLE_YEAR=$(date -d "@$ARTICLE_TIMESTAMP" +%Y)
		ARTICLE_URL=$(jq ".items[$j] .link" $WORKDIR/tmp_$i.json | sed 's/"//g')
		if (( ARTICLE_YEAR >= 2019 )); then
			echo "$ARTICLE_URL"
			wget --user-agent="$WGET_UA" "https://ctvnews.ca$ARTICLE_URL" -O $WORKDIR/tmp.html -q
			echo ">>> https://ctvnews.ca$ARTICLE_URL" > $WORKDIR/curlinks.txt
			cat $WORKDIR/tmp.html | grep -o 'href=\\"[^\\"]*\\"' | sed 's/href=\\"//; s/\\"//' | grep -e 'https://london.ca/' -e 'https://www.london.ca/' -e 'http://london.ca/' -e 'http://www.london.ca/' -e 'pub-london.escribemeetings.com' >> $WORKDIR/curlinks.txt
			echo "===========================================================" >> masterlinks.txt
			echo "==========================================================="
			while IFS= read -r LINE; do
				echo "Link: $LINE" >> masterlinks.txt
				echo "Link: $LINE" 
			done < $WORKDIR/curlinks.txt
		else
			echo "Reached 2019, no longer gathering links."
			SEARCH_END="TRUE"
		fi
		((j++))
	done

	i=$(( i + BATCH_SIZE ))

    sleep 3
done
