Python script for Extract all images from given url

This python script extract all images from given url & then stored it to local hard drive ;where you can extract metadata & can gather information about victim.Before you have to install BeautifulSoup
third party module.

#!/usr/bin/python

from BeautifulSoup import BeautifulSoup
import os
import optparse

def mirrorImages(url, dir):

html = ab.open(url)
soup = BeautifulSoup(html)
image_tags = soup.findAll(‘img’)

for image in image_tags:
filename = image[‘src’].lstrip(‘http://’)
filename = os.path.join(dir,\
filename.replace(‘/’, ‘_’))
print ‘[+] Saving ‘ + str(filename)
data = ab.open(image[‘src’]).read()
ab.back()
save = open(filename, ‘wb’)
save.write(data)
save.close()

def main():
parser = optparse.OptionParser(‘usage %prog ‘+\
‘-u -d ‘)

parser.add_option(‘-u’, dest=’tgtURL’, type=’string’,\
help=’specify target url’)
parser.add_option(‘-d’, dest=’dir’, type=’string’,\
help=’specify destination directory’)

(options, args) = parser.parse_args()

url = options.tgtURL
dir = options.dir

if url == None or dir == None:
print parser.usage
exit(0)

else:
try:
mirrorImages(url, dir)
except Exception, e:
print ‘[-] Error Mirroring Images.’
print ‘[-] ‘ + str(e)

if __name__ == ‘__main__’:
main()

Usage:-

chmod +x script_name

./script_name -u url -d directory to save images.

Leave a comment