Merge lp:~cjohnston/uwn/uwn into lp:uwn

Proposed by Chris Johnston
Status: Merged
Merged at revision: 2
Proposed branch: lp:~cjohnston/uwn/uwn
Merge into: lp:uwn
Diff against target: 816 lines (+776/-0)
8 files modified
security-and-updates/ArchiveChanges.py (+125/-0)
security-and-updates/Dapper.py (+90/-0)
security-and-updates/Hardy.py (+90/-0)
security-and-updates/Intrepid.py (+90/-0)
security-and-updates/Jaunty.py (+90/-0)
security-and-updates/Karmic.py (+90/-0)
security-and-updates/Lucid.py (+91/-0)
security-and-updates/Security.py (+110/-0)
To merge this branch: bzr merge lp:~cjohnston/uwn/uwn
Reviewer Review Type Date Requested Status
Chris Johnston (community) Approve
Review via email: mp+27570@code.launchpad.net

Description of the change

Adds security and update scripts

To post a comment you must log in.
Revision history for this message
Chris Johnston (cjohnston) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added directory 'security-and-updates'
2=== added file 'security-and-updates/ArchiveChanges.py'
3--- security-and-updates/ArchiveChanges.py 1970-01-01 00:00:00 +0000
4+++ security-and-updates/ArchiveChanges.py 2010-06-15 00:39:26 +0000
5@@ -0,0 +1,125 @@
6+#!/usr/bin/env python
7+###
8+# Copyright (c) 2009 Nick Ali
9+#
10+# This program is free software; you can redistribute it and/or modify
11+# it under the terms of version 2 of the GNU General Public License as
12+# published by the Free Software Foundation.
13+#
14+# This program is distributed in the hope that it will be useful,
15+# but WITHOUT ANY WARRANTY; without even the implied warranty of
16+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17+# GNU General Public License for more details.
18+#
19+###
20+
21+###
22+# To use this program open up a terminal and cd into the directory that it's in.
23+# So if this #is your desktop "cd ./Desktop". Then "python ArchiveChanges.py".
24+# In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
25+
26+import urllib2
27+import sys
28+import datetime
29+from BeautifulSoup import BeautifulSoup
30+from getopt import getopt, GetoptError
31+
32+def useage():
33+ print """
34+python ArchiveChanges.py [-h] [-s] [-r <release>] [-m <month>] [-y <year>]
35+options:
36+ -r <release> Release name to process; eg. Dapper, Hardy, Jaunty, Karmic, Lucid
37+ -m <month> Month of Ubuntu Updates to process; eg. January, February, March ...
38+ -y <year> Year of Ubuntu Updates to process; eg. 2009, 2010, ...
39+ -s Process Security Updates
40+ -n <num> Max number of updates to print; defaults to 3000
41+ -h Print this help menu
42+
43+examples:
44+
45+ python ArchiveChanges.py -h
46+
47+ python ArchiveChanges.py -r Lucid -m April -y 2010
48+
49+ python ArchiveChanges.py -s
50+"""
51+ exit(1)
52+
53+# Default values
54+release = "lucid"
55+today = datetime.date.today()
56+month = today.strftime("%B")
57+year = today.strftime("%Y")
58+security = False
59+maxItems = False
60+
61+try:
62+ opts, args = getopt(sys.argv[1:], 'hsr:m:y:n:')
63+ for opt, val in opts:
64+ if opt == '-r':
65+ release = val.lower()
66+ continue
67+ elif opt == '-m':
68+ month = val.title()
69+ continue
70+ elif opt == '-y':
71+ year = val
72+ continue
73+ elif opt == '-s':
74+ security = True
75+ break
76+ elif opt == '-n':
77+ maxItems = int(val)
78+ continue
79+ elif opt == '-h':
80+ print useage()
81+
82+except GetoptError:
83+ print useage()
84+
85+url ="https://lists.ubuntu.com/archives/%s-changes/%s-%s/thread.html" % (release, year, month)
86+
87+if security:
88+ url = "http://www.ubuntu.com/usn"
89+
90+#grab the mailing page
91+currentItems = 0
92+mlPage = urllib2.urlopen(url)
93+soup = BeautifulSoup(mlPage)
94+
95+title = soup.find('title')
96+print '-------- Start ' + title.string + ' --------------'
97+
98+baseUrl = url[:url.rfind('/')]
99+body = soup.html.body
100+for li in body.findAll('li'):
101+ if maxItems and (currentItems >= maxItems):
102+ break
103+ ahref = li.find('a')
104+ # Only deal with valid links
105+ if ahref != None:
106+ packageName = ''
107+ # Strip out the [ubuntu/....
108+ if ahref.string.startswith('[ubuntu'):
109+ closingBracket = ahref.string.find(']');
110+ packageName = ahref.string[closingBracket+2:].rstrip()
111+ elif ahref.string.startswith('USN'):
112+ packageName = ahref.string
113+
114+ if len(packageName) > 1:
115+ # Strip out the (Accepted)
116+ whereSource = packageName.find('(Accepted')
117+ if whereSource > 0 :
118+ packageName = packageName[:whereSource]
119+
120+ # If there are multiple packages, just use the first one
121+ extraNames = packageName.find(',')
122+ if extraNames != -1:
123+ packageName = packageName[:extraNames]
124+
125+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
126+ currentItems = currentItems + 1
127+print '-------- End ' + title.string + ' --------------\n'
128+
129+
130+
131
132=== added file 'security-and-updates/Dapper.py'
133--- security-and-updates/Dapper.py 1970-01-01 00:00:00 +0000
134+++ security-and-updates/Dapper.py 2010-06-15 00:39:26 +0000
135@@ -0,0 +1,90 @@
136+#!/usr/bin/env python
137+###
138+# Copyright (c) 2009 Nick Ali
139+#
140+# This program is free software; you can redistribute it and/or modify
141+# it under the terms of version 2 of the GNU General Public License as
142+# published by the Free Software Foundation.
143+#
144+# This program is distributed in the hope that it will be useful,
145+# but WITHOUT ANY WARRANTY; without even the implied warranty of
146+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
147+# GNU General Public License for more details.
148+#
149+###
150+
151+###
152+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
153+
154+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
155+###
156+
157+import urllib2
158+import sys
159+from BeautifulSoup import BeautifulSoup
160+
161+### Variables
162+#Mailing list URLs to grab data from. Add the urls here.
163+
164+###
165+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
166+###
167+
168+httpUrl = [
169+#'http://www.ubuntu.com/usn',
170+'https://lists.ubuntu.com/archives/dapper-changes/2010-June/thread.html',
171+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
172+#'https://lists.ubuntu.com/archives/hardy-changes/2009-April/thread.html',
173+#'https://lists.ubuntu.com/archives/intrepid-changes/2009-April/thread.html'
174+#'https://lists.ubuntu.com/archives/jaunty-changes/2009-April/'
175+]
176+### You should not have to change anything under this line ###
177+
178+maxItems = 3000
179+if len(sys.argv) > 1:
180+ maxItems = int(sys.argv[1])
181+
182+
183+#grab the mailing page
184+for currentUrl in httpUrl:
185+ currentItems = 0
186+ mlPage = urllib2.urlopen(currentUrl)
187+ soup = BeautifulSoup(mlPage)
188+
189+ title = soup.find('title')
190+ print '-------- Start ' + title.string + ' --------------'
191+
192+ baseUrl = currentUrl[:currentUrl.rfind('/')]
193+ body = soup.html.body
194+
195+ for li in body.findAll('li'):
196+ if currentItems >= maxItems:
197+ break
198+ ahref = li.find('a')
199+ # Only deal with valid links
200+ if ahref != None:
201+ packageName = ''
202+ # Strip out the [ubuntu/....
203+ if ahref.string.startswith('[ubuntu'):
204+ closingBracket = ahref.string.find(']');
205+ packageName = ahref.string[closingBracket+2:].rstrip()
206+ elif ahref.string.startswith('USN'):
207+ packageName = ahref.string
208+
209+ if len(packageName) > 1:
210+ # Strip out the (Accepted)
211+ whereSource = packageName.find('(Accepted')
212+ if whereSource > 0 :
213+ packageName = packageName[:whereSource]
214+
215+ # If there are multiple packages, just use the first one
216+ extraNames = packageName.find(',')
217+ if extraNames != -1:
218+ packageName = packageName[:extraNames]
219+
220+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
221+ currentItems = currentItems + 1
222+ print '-------- End ' + title.string + ' --------------\n'
223+
224+
225+
226
227=== added file 'security-and-updates/Hardy.py'
228--- security-and-updates/Hardy.py 1970-01-01 00:00:00 +0000
229+++ security-and-updates/Hardy.py 2010-06-15 00:39:26 +0000
230@@ -0,0 +1,90 @@
231+#!/usr/bin/env python
232+###
233+# Copyright (c) 2009 Nick Ali
234+#
235+# This program is free software; you can redistribute it and/or modify
236+# it under the terms of version 2 of the GNU General Public License as
237+# published by the Free Software Foundation.
238+#
239+# This program is distributed in the hope that it will be useful,
240+# but WITHOUT ANY WARRANTY; without even the implied warranty of
241+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
242+# GNU General Public License for more details.
243+#
244+###
245+
246+###
247+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
248+
249+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
250+###
251+
252+import urllib2
253+import sys
254+from BeautifulSoup import BeautifulSoup
255+
256+### Variables
257+#Mailing list URLs to grab data from. Add the urls here.
258+
259+###
260+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
261+###
262+
263+httpUrl = [
264+#'http://www.ubuntu.com/usn',
265+#'https://lists.ubuntu.com/archives/dapper-changes/2009-April/thread.html',
266+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
267+'https://lists.ubuntu.com/archives/hardy-changes/2010-June/thread.html',
268+#'https://lists.ubuntu.com/archives/intrepid-changes/2009-April/thread.html'
269+#'https://lists.ubuntu.com/archives/jaunty-changes/2009-April/'
270+]
271+### You should not have to change anything under this line ###
272+
273+maxItems = 3000
274+if len(sys.argv) > 1:
275+ maxItems = int(sys.argv[1])
276+
277+
278+#grab the mailing page
279+for currentUrl in httpUrl:
280+ currentItems = 0
281+ mlPage = urllib2.urlopen(currentUrl)
282+ soup = BeautifulSoup(mlPage)
283+
284+ title = soup.find('title')
285+ print '-------- Start ' + title.string + ' --------------'
286+
287+ baseUrl = currentUrl[:currentUrl.rfind('/')]
288+ body = soup.html.body
289+
290+ for li in body.findAll('li'):
291+ if currentItems >= maxItems:
292+ break
293+ ahref = li.find('a')
294+ # Only deal with valid links
295+ if ahref != None:
296+ packageName = ''
297+ # Strip out the [ubuntu/....
298+ if ahref.string.startswith('[ubuntu'):
299+ closingBracket = ahref.string.find(']');
300+ packageName = ahref.string[closingBracket+2:].rstrip()
301+ elif ahref.string.startswith('USN'):
302+ packageName = ahref.string
303+
304+ if len(packageName) > 1:
305+ # Strip out the (Accepted)
306+ whereSource = packageName.find('(Accepted')
307+ if whereSource > 0 :
308+ packageName = packageName[:whereSource]
309+
310+ # If there are multiple packages, just use the first one
311+ extraNames = packageName.find(',')
312+ if extraNames != -1:
313+ packageName = packageName[:extraNames]
314+
315+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
316+ currentItems = currentItems + 1
317+ print '-------- End ' + title.string + ' --------------\n'
318+
319+
320+
321
322=== added file 'security-and-updates/Intrepid.py'
323--- security-and-updates/Intrepid.py 1970-01-01 00:00:00 +0000
324+++ security-and-updates/Intrepid.py 2010-06-15 00:39:26 +0000
325@@ -0,0 +1,90 @@
326+#!/usr/bin/env python
327+###
328+# Copyright (c) 2009 Nick Ali
329+#
330+# This program is free software; you can redistribute it and/or modify
331+# it under the terms of version 2 of the GNU General Public License as
332+# published by the Free Software Foundation.
333+#
334+# This program is distributed in the hope that it will be useful,
335+# but WITHOUT ANY WARRANTY; without even the implied warranty of
336+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
337+# GNU General Public License for more details.
338+#
339+###
340+
341+###
342+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
343+
344+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
345+###
346+
347+import urllib2
348+import sys
349+from BeautifulSoup import BeautifulSoup
350+
351+### Variables
352+#Mailing list URLs to grab data from. Add the urls here.
353+
354+###
355+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
356+###
357+
358+httpUrl = [
359+#'http://www.ubuntu.com/usn',
360+#'https://lists.ubuntu.com/archives/dapper-changes/2009-April/thread.html',
361+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
362+#'https://lists.ubuntu.com/archives/hardy-changes/2009-April/thread.html',
363+'https://lists.ubuntu.com/archives/intrepid-changes/2010-April/thread.html'
364+#'https://lists.ubuntu.com/archives/jaunty-changes/2009-April/'
365+]
366+### You should not have to change anything under this line ###
367+
368+maxItems = 3000
369+if len(sys.argv) > 1:
370+ maxItems = int(sys.argv[1])
371+
372+
373+#grab the mailing page
374+for currentUrl in httpUrl:
375+ currentItems = 0
376+ mlPage = urllib2.urlopen(currentUrl)
377+ soup = BeautifulSoup(mlPage)
378+
379+ title = soup.find('title')
380+ print '-------- Start ' + title.string + ' --------------'
381+
382+ baseUrl = currentUrl[:currentUrl.rfind('/')]
383+ body = soup.html.body
384+
385+ for li in body.findAll('li'):
386+ if currentItems >= maxItems:
387+ break
388+ ahref = li.find('a')
389+ # Only deal with valid links
390+ if ahref != None:
391+ packageName = ''
392+ # Strip out the [ubuntu/....
393+ if ahref.string.startswith('[ubuntu'):
394+ closingBracket = ahref.string.find(']');
395+ packageName = ahref.string[closingBracket+2:].rstrip()
396+ elif ahref.string.startswith('USN'):
397+ packageName = ahref.string
398+
399+ if len(packageName) > 1:
400+ # Strip out the (Accepted)
401+ whereSource = packageName.find('(Accepted')
402+ if whereSource > 0 :
403+ packageName = packageName[:whereSource]
404+
405+ # If there are multiple packages, just use the first one
406+ extraNames = packageName.find(',')
407+ if extraNames != -1:
408+ packageName = packageName[:extraNames]
409+
410+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
411+ currentItems = currentItems + 1
412+ print '-------- End ' + title.string + ' --------------\n'
413+
414+
415+
416
417=== added file 'security-and-updates/Jaunty.py'
418--- security-and-updates/Jaunty.py 1970-01-01 00:00:00 +0000
419+++ security-and-updates/Jaunty.py 2010-06-15 00:39:26 +0000
420@@ -0,0 +1,90 @@
421+#!/usr/bin/env python
422+###
423+# Copyright (c) 2009 Nick Ali
424+#
425+# This program is free software; you can redistribute it and/or modify
426+# it under the terms of version 2 of the GNU General Public License as
427+# published by the Free Software Foundation.
428+#
429+# This program is distributed in the hope that it will be useful,
430+# but WITHOUT ANY WARRANTY; without even the implied warranty of
431+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
432+# GNU General Public License for more details.
433+#
434+###
435+
436+###
437+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
438+
439+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
440+###
441+
442+import urllib2
443+import sys
444+from BeautifulSoup import BeautifulSoup
445+
446+### Variables
447+#Mailing list URLs to grab data from. Add the urls here.
448+
449+###
450+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
451+###
452+
453+httpUrl = [
454+#'http://www.ubuntu.com/usn',
455+#'https://lists.ubuntu.com/archives/dapper-changes/2009-April/thread.html',
456+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
457+#'https://lists.ubuntu.com/archives/hardy-changes/2009-April/thread.html',
458+#'https://lists.ubuntu.com/archives/intrepid-changes/2009-April/thread.html'
459+'https://lists.ubuntu.com/archives/jaunty-changes/2010-June/thread.html'
460+]
461+### You should not have to change anything under this line ###
462+
463+maxItems = 3000
464+if len(sys.argv) > 1:
465+ maxItems = int(sys.argv[1])
466+
467+
468+#grab the mailing page
469+for currentUrl in httpUrl:
470+ currentItems = 0
471+ mlPage = urllib2.urlopen(currentUrl)
472+ soup = BeautifulSoup(mlPage)
473+
474+ title = soup.find('title')
475+ print '-------- Start ' + title.string + ' --------------'
476+
477+ baseUrl = currentUrl[:currentUrl.rfind('/')]
478+ body = soup.html.body
479+
480+ for li in body.findAll('li'):
481+ if currentItems >= maxItems:
482+ break
483+ ahref = li.find('a')
484+ # Only deal with valid links
485+ if ahref != None:
486+ packageName = ''
487+ # Strip out the [ubuntu/....
488+ if ahref.string.startswith('[ubuntu'):
489+ closingBracket = ahref.string.find(']');
490+ packageName = ahref.string[closingBracket+2:].rstrip()
491+ elif ahref.string.startswith('USN'):
492+ packageName = ahref.string
493+
494+ if len(packageName) > 1:
495+ # Strip out the (Accepted)
496+ whereSource = packageName.find('(Accepted')
497+ if whereSource > 0 :
498+ packageName = packageName[:whereSource]
499+
500+ # If there are multiple packages, just use the first one
501+ extraNames = packageName.find(',')
502+ if extraNames != -1:
503+ packageName = packageName[:extraNames]
504+
505+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
506+ currentItems = currentItems + 1
507+ print '-------- End ' + title.string + ' --------------\n'
508+
509+
510+
511
512=== added file 'security-and-updates/Karmic.py'
513--- security-and-updates/Karmic.py 1970-01-01 00:00:00 +0000
514+++ security-and-updates/Karmic.py 2010-06-15 00:39:26 +0000
515@@ -0,0 +1,90 @@
516+#!/usr/bin/env python
517+###
518+# Copyright (c) 2009 Nick Ali
519+#
520+# This program is free software; you can redistribute it and/or modify
521+# it under the terms of version 2 of the GNU General Public License as
522+# published by the Free Software Foundation.
523+#
524+# This program is distributed in the hope that it will be useful,
525+# but WITHOUT ANY WARRANTY; without even the implied warranty of
526+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
527+# GNU General Public License for more details.
528+#
529+###
530+
531+###
532+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
533+
534+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
535+###
536+
537+import urllib2
538+import sys
539+from BeautifulSoup import BeautifulSoup
540+
541+### Variables
542+#Mailing list URLs to grab data from. Add the urls here.
543+
544+###
545+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
546+###
547+
548+httpUrl = [
549+#'http://www.ubuntu.com/usn',
550+#'https://lists.ubuntu.com/archives/dapper-changes/2009-April/thread.html',
551+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
552+#'https://lists.ubuntu.com/archives/hardy-changes/2009-April/thread.html',
553+#'https://lists.ubuntu.com/archives/intrepid-changes/2009-April/thread.html'
554+'https://lists.ubuntu.com/archives/karmic-changes/2010-June/thread.html'
555+]
556+### You should not have to change anything under this line ###
557+
558+maxItems = 3000
559+if len(sys.argv) > 1:
560+ maxItems = int(sys.argv[1])
561+
562+
563+#grab the mailing page
564+for currentUrl in httpUrl:
565+ currentItems = 0
566+ mlPage = urllib2.urlopen(currentUrl)
567+ soup = BeautifulSoup(mlPage)
568+
569+ title = soup.find('title')
570+ print '-------- Start ' + title.string + ' --------------'
571+
572+ baseUrl = currentUrl[:currentUrl.rfind('/')]
573+ body = soup.html.body
574+
575+ for li in body.findAll('li'):
576+ if currentItems >= maxItems:
577+ break
578+ ahref = li.find('a')
579+ # Only deal with valid links
580+ if ahref != None:
581+ packageName = ''
582+ # Strip out the [ubuntu/....
583+ if ahref.string.startswith('[ubuntu'):
584+ closingBracket = ahref.string.find(']');
585+ packageName = ahref.string[closingBracket+2:].rstrip()
586+ elif ahref.string.startswith('USN'):
587+ packageName = ahref.string
588+
589+ if len(packageName) > 1:
590+ # Strip out the (Accepted)
591+ whereSource = packageName.find('(Accepted')
592+ if whereSource > 0 :
593+ packageName = packageName[:whereSource]
594+
595+ # If there are multiple packages, just use the first one
596+ extraNames = packageName.find(',')
597+ if extraNames != -1:
598+ packageName = packageName[:extraNames]
599+
600+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
601+ currentItems = currentItems + 1
602+ print '-------- End ' + title.string + ' --------------\n'
603+
604+
605+
606
607=== added file 'security-and-updates/Lucid.py'
608--- security-and-updates/Lucid.py 1970-01-01 00:00:00 +0000
609+++ security-and-updates/Lucid.py 2010-06-15 00:39:26 +0000
610@@ -0,0 +1,91 @@
611+#!/usr/bin/env python
612+###
613+# Copyright (c) 2009 Nick Ali
614+#
615+# This program is free software; you can redistribute it and/or modify
616+# it under the terms of version 2 of the GNU General Public License as
617+# published by the Free Software Foundation.
618+#
619+# This program is distributed in the hope that it will be useful,
620+# but WITHOUT ANY WARRANTY; without even the implied warranty of
621+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
622+# GNU General Public License for more details.
623+#
624+###
625+
626+###
627+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
628+
629+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
630+###
631+
632+import urllib2
633+import sys
634+from BeautifulSoup import BeautifulSoup
635+
636+### Variables
637+#Mailing list URLs to grab data from. Add the urls here.
638+
639+###
640+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
641+###
642+
643+httpUrl = [
644+#'http://www.ubuntu.com/usn',
645+#'https://lists.ubuntu.com/archives/dapper-changes/2009-April/thread.html',
646+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
647+#'https://lists.ubuntu.com/archives/hardy-changes/2009-April/thread.html',
648+#'https://lists.ubuntu.com/archives/intrepid-changes/2009-April/thread.html'
649+#'https://lists.ubuntu.com/archives/karmic-changes/2010-March/thread.html'
650+'https://lists.ubuntu.com/archives/lucid-changes/2010-June/thread.html'
651+]
652+### You should not have to change anything under this line ###
653+
654+maxItems = 3000
655+if len(sys.argv) > 1:
656+ maxItems = int(sys.argv[1])
657+
658+
659+#grab the mailing page
660+for currentUrl in httpUrl:
661+ currentItems = 0
662+ mlPage = urllib2.urlopen(currentUrl)
663+ soup = BeautifulSoup(mlPage)
664+
665+ title = soup.find('title')
666+ print '-------- Start ' + title.string + ' --------------'
667+
668+ baseUrl = currentUrl[:currentUrl.rfind('/')]
669+ body = soup.html.body
670+
671+ for li in body.findAll('li'):
672+ if currentItems >= maxItems:
673+ break
674+ ahref = li.find('a')
675+ # Only deal with valid links
676+ if ahref != None:
677+ packageName = ''
678+ # Strip out the [ubuntu/....
679+ if ahref.string.startswith('[ubuntu'):
680+ closingBracket = ahref.string.find(']');
681+ packageName = ahref.string[closingBracket+2:].rstrip()
682+ elif ahref.string.startswith('USN'):
683+ packageName = ahref.string
684+
685+ if len(packageName) > 1:
686+ # Strip out the (Accepted)
687+ whereSource = packageName.find('(Accepted')
688+ if whereSource > 0 :
689+ packageName = packageName[:whereSource]
690+
691+ # If there are multiple packages, just use the first one
692+ extraNames = packageName.find(',')
693+ if extraNames != -1:
694+ packageName = packageName[:extraNames]
695+
696+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
697+ currentItems = currentItems + 1
698+ print '-------- End ' + title.string + ' --------------\n'
699+
700+
701+
702
703=== added file 'security-and-updates/Security.py'
704--- security-and-updates/Security.py 1970-01-01 00:00:00 +0000
705+++ security-and-updates/Security.py 2010-06-15 00:39:26 +0000
706@@ -0,0 +1,110 @@
707+#!/usr/bin/env python
708+###
709+# Copyright (c) 2009 Nick Ali
710+#
711+# This program is free software; you can redistribute it and/or modify
712+# it under the terms of version 2 of the GNU General Public License as
713+# published by the Free Software Foundation.
714+#
715+# This program is distributed in the hope that it will be useful,
716+# but WITHOUT ANY WARRANTY; without even the implied warranty of
717+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
718+# GNU General Public License for more details.
719+#
720+###
721+
722+###
723+# To use this program open up a terminal and cd into the directory that it's in. So if this #is your desktop "cd ./Desktop". Then "python mailinglists3.py". That will change if you #rename this file. In order to run this you need the "urllib2" and "BeautifulSoup" modules for python.
724+
725+#new feature: enter a number at the end of a string to limit the number of entries listed. Example: "python mailinglists3.py 10" will list 10 entries from each link that you have it pulling info from.
726+###
727+
728+import urllib2
729+import sys
730+from BeautifulSoup import BeautifulSoup
731+import HTMLParser
732+
733+### Variables
734+#Mailing list URLs to grab data from. Add the urls here.
735+
736+###
737+#this can be setup to do any of the different update sections. links for the usn wont need to be changed, but for each section it needs to be updated for each month. due to limits in terminal scrollback, it's usually good to only do one at a time.
738+###
739+
740+httpUrl = [
741+#'http://www.ubuntu.com/usn',
742+#'https://lists.ubuntu.com/archives/dapper-changes/2009-April/thread.html',
743+#'https://lists.ubuntu.com/archives/gutsy-changes/2009-April/thread.html',
744+#'https://lists.ubuntu.com/archives/hardy-changes/2009-April/thread.html',
745+#'https://lists.ubuntu.com/archives/intrepid-changes/2009-April/thread.html'
746+#'https://lists.ubuntu.com/archives/jaunty-changes/2009-April/'
747+'https://lists.ubuntu.com/archives/ubuntu-security-announce/2010-May/thread.html'
748+]
749+### You should not have to change anything under this line ###
750+
751+maxItems = 3000
752+if len(sys.argv) > 1:
753+ maxItems = int(sys.argv[1])
754+
755+
756+#grab the mailing page
757+for currentUrl in httpUrl:
758+ currentItems = 0
759+ mlPage = urllib2.urlopen(currentUrl)
760+ mlPage = mlPage.read()
761+# if "XHTML" in mlPage:
762+# c = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">\n<html>\n"""
763+# mlPage = c + '\n'.join(mlPage.split('\n')[3:])
764+
765+ mlPage = mlPage.replace('text/css">\n', 'text/css><!--\n')
766+ mlPage = mlPage.replace('</style>', '--></style>')
767+
768+ try:
769+ soup = BeautifulSoup(mlPage)
770+ except HTMLParser.HTMLParseError, m:
771+ sys.stderr.write("Couldn't get page: %s\n" % m)
772+ print mlPage
773+ sys.exit()
774+ finally:
775+ fh = open('page', 'w')
776+ fh.write(mlPage)
777+ fh.close()
778+
779+ title = soup.find('title')
780+ print '-------- Start ' + title.string + ' --------------'
781+
782+ baseUrl = currentUrl[:currentUrl.rfind('/')]
783+ body = soup.html.body
784+
785+ for li in body.findAll('li'):
786+ if currentItems >= maxItems:
787+ break
788+ ahref = li.find('a')
789+
790+ # Only deal with valid links
791+ if ahref != None:
792+ packageName = ''
793+ # Strip out the [ubuntu/....
794+ if ahref.string.startswith('[ubuntu'):
795+ closingBracket = ahref.string.find(']');
796+ packageName = ahref.string[closingBracket+2:].rstrip()
797+ elif ahref.string.startswith('[USN'):
798+ packageName = ahref.string
799+
800+ if len(packageName) > 1:
801+ # Strip out the (Accepted)
802+ whereSource = packageName.find('(Accepted')
803+ if whereSource > 0 :
804+ packageName = packageName[:whereSource]
805+
806+ # If there are multiple packages, just use the first one
807+ extraNames = packageName.find(',')
808+ if extraNames != -1:
809+ packageName = packageName[:extraNames]
810+
811+ print ' * ' + packageName + '- ' + baseUrl + '/' + ahref['href']
812+ currentItems = currentItems + 1
813+ print '-------- End ' + title.string + ' --------------\n'
814+
815+
816+

Subscribers

People subscribed via source and target branches