~grubng-dev/grubng/tools-python

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Convert XML sitemaps to plain text file and check robots.txt
#
# Copyright (C) 2010,2011  Bartek thindil Jasicki
#
# This file is part of Grub.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
###########################################################################

import os, gzip, robotparser, httplib
from xml.dom import minidom

directory = './sitemaps/'
badext = ['.pdf', '.mov', '.mp3', '.jpg', '.mpg', '.doc', '.exe', '.wmv', '.m4v', '.swf', '.flv', '.mp3', '.bin', '.zip', '.dmg', '.ppt', '.dll', '.wav', '.avi', '.gif', '.iso', '.tgz', '.bmp', '.svg', '.ico', '.pbm']
useragent = 'GrubNG 0.1 (http://grub.org)'
entries = os.listdir(directory)
oldroboturl = ''
available = False
for entry in entries:
    if os.path.isfile(directory+entry):
        if entry[-6:] == "xml.gz":
            #decompressing file
            fin = gzip.open(directory + entry, "rb")
            fout = open(directory + entry[0:-3], "wb")
            fout.writelines(fin)
            fout.close()
            fin.close()
            #extract URL's, check robots.txt and put it to new text file
            xmldoc = minidom.parse(directory + entry[0:-3])
            urlslist = xmldoc.getElementsByTagName('url')
            fout = open("urls.txt", "ab");
            for url in urlslist:
                link = url.getElementsByTagName('loc')
                if (link[0].firstChild.data.find('https://') > -1):
                    continue
                index = link[0].firstChild.data.find('/', 7)
                if index > -1:
                    roboturl = link[0].firstChild.data[:index] + '/robots.txt'
                else:
                    roboturl = link[0].firstChild.data + '/robots.txt'
                #if new url is different than old one, get it
                if roboturl != oldroboturl:
                    robot = robotparser.RobotFileParser()
                    robot.set_url(roboturl)
                    try:
                        robot.read()
                    except IOError:
                        continue
                    except httplib.InvalidURL:
                        continue
                    oldroboturl = roboturl
                #if we can visit this link, add it to file
                try:
                    available = robot.can_fetch(useragent, link[0].firstChild.data)
                except KeyError:
                    continue
		except InvalidURL:
		    continue
                if available:
                    urls2 = link[0].firstChild.data.replace('http://', '')
                    if urls2[-4:].lower() in badext:
                        continue
                    fout.write(urls2 + "\n")
            os.remove(directory + entry[0:-3])
        os.remove(directory + entry)