#1.批量爬取HMDB每页的网页:
for i in {1..46};
do
curl -s https://hmdb.ca/hml/metabolites?page=${i} -o page${i};
sleep 15;
done
#2.解析本地的page网页,得到代谢物id-name字典
for i in {1..46};
do
cat page${i} \
| grep HMDB \
| sed 's#class="metabolite-name\">#\n#g' \
| grep HMDB \
| sed 1d|sed '$d' \
| sed 's/<strong>//g' \
| awk '{match($0, /HMDB[0-9]{7}/, arr);
hmdb_id = arr[0];
match($0, /[^>]*>([^<]+)/, arr);
compound_name = arr[1];
if (length(hmdb_id) && length(compound_name)) print hmdb_id"\t"compound_name}' > page${i}.dict;
done
#3.在上一步的基础上,批量爬取每个代谢物的详细信息
cat page*.dict |while IFS=$'\t' read -r id name
do
curl -s https://hmdb.ca/metabolites/${id}.xml -o ${id}.xml
sleep 10 # 睡一会,不要太疲惫
done
#批量解析xml文件,得到【代谢物-通路】字典
cat page*.dict |while IFS=$'\t' read -r id name
do
if test -z "$(cat ${id}.xml|xmllint --xpath "//pathway/name" - 2>/dev/null)"; then
echo "# ${id} skiped."
else
echo "# ${id} running" &&\
cat ${id}.xml \
| xmllint --xpath "//pathway/name | //pathway/smpdb_id | //pathway/kegg_map_id" - \
| awk -F'[<>]' -v HMDB_ID="$id" -v HMDB_NM="$name" '{
if (FNR % 3 == 1) name = ($3 ? $3 : "NA");
if (FNR % 3 == 2) smpdb_id = ($3 ? $3 : "NA");
if (FNR % 3 == 0) kegg_map_id = ($3 ? $3 : "NA");
if (FNR % 3 == 0) printf("%s\t%s\t%s\t%s\t%s\n", HMDB_ID, HMDB_NM, name, smpdb_id, kegg_map_id);
}' >> Meta2Pathway.tsv
fi
done
暂无评论