Merge lp:~ubuntu-branches/ubuntu/precise/cobbler/precise-201110250011 into lp:ubuntu/precise/cobbler

Proposed by Ubuntu Package Importer
Status: Rejected
Rejected by: James Westby
Proposed branch: lp:~ubuntu-branches/ubuntu/precise/cobbler/precise-201110250011
Merge into: lp:ubuntu/precise/cobbler
Diff against target: 18412 lines (+146/-17324) (has conflicts)
70 files modified
.pc/05_cobbler_fix_reposync_permissions.patch/cobbler/action_reposync.py (+0/-568)
.pc/12_fix_dhcp_restart.patch/cobbler/modules/sync_post_restart_services.py (+0/-66)
.pc/21_cobbler_use_netboot.patch/cobbler/modules/manage_import_debian_ubuntu.py (+0/-777)
.pc/33_authn_configfile.patch/config/modules.conf (+0/-86)
.pc/34_fix_apache_wont_start.patch/config/cobbler_web.conf (+0/-14)
.pc/39_cw_remove_vhost.patch/config/cobbler_web.conf (+0/-14)
.pc/40_ubuntu_bind9_management.patch/cobbler/action_check.py (+0/-482)
.pc/40_ubuntu_bind9_management.patch/cobbler/modules/manage_bind.py (+0/-332)
.pc/40_ubuntu_bind9_management.patch/cobbler/modules/sync_post_restart_services.py (+0/-66)
.pc/40_ubuntu_bind9_management.patch/templates/etc/named.template (+0/-31)
.pc/41_update_tree_path_with_arch.patch/cobbler/modules/manage_import_debian_ubuntu.py (+0/-777)
.pc/42_fix_repomirror_create_sync.patch/cobbler/action_reposync.py (+0/-568)
.pc/42_fix_repomirror_create_sync.patch/cobbler/codes.py (+0/-98)
.pc/42_fix_repomirror_create_sync.patch/cobbler/modules/manage_import_debian_ubuntu.py (+0/-779)
.pc/43_fix_reposync_env_variable.patch/cobbler/action_reposync.py (+0/-572)
.pc/45_add_gpxe_support.patch/cobbler/action_check.py (+0/-482)
.pc/45_add_gpxe_support.patch/cobbler/action_litesync.py (+0/-167)
.pc/45_add_gpxe_support.patch/cobbler/modules/manage_in_tftpd.py (+0/-189)
.pc/45_add_gpxe_support.patch/cobbler/modules/manage_isc.py (+0/-201)
.pc/45_add_gpxe_support.patch/cobbler/pxegen.py (+0/-837)
.pc/45_add_gpxe_support.patch/config/settings (+0/-370)
.pc/46_valid_hostname_for_dns.patch/cobbler/modules/manage_isc.py (+0/-201)
.pc/47_ubuntu_add_oneiric_codename.patch/cobbler/codes.py (+0/-98)
.pc/49_ubuntu_add_arm_arch_support.patch/cobbler/action_import.py (+0/-1332)
.pc/49_ubuntu_add_arm_arch_support.patch/cobbler/item_distro.py (+0/-245)
.pc/49_ubuntu_add_arm_arch_support.patch/cobbler/item_image.py (+0/-197)
.pc/49_ubuntu_add_arm_arch_support.patch/cobbler/item_repo.py (+0/-198)
.pc/49_ubuntu_add_arm_arch_support.patch/cobbler/utils.py (+0/-2074)
.pc/49_ubuntu_add_arm_arch_support.patch/web/cobbler_web/templates/import.tmpl (+0/-46)
.pc/50_fix_cobbler_timezone.patch/web/settings.py (+0/-69)
.pc/51_koan_grub2_instead_of_grubby.patch/koan/app.py (+0/-1689)
.pc/52_ubuntu_default_config.patch/config/settings (+0/-374)
.pc/53_sample_preseed_kopts_postinst.patch/kickstarts/sample.seed (+0/-96)
.pc/54_koan_fix_tree_when_ksmeta.patch/koan/app.py (+0/-1741)
.pc/55_ubuntu_branding.patch/web/cobbler_web/templates/login.tmpl (+0/-28)
.pc/55_ubuntu_branding.patch/web/cobbler_web/templates/master.tmpl (+0/-65)
.pc/55_ubuntu_branding.patch/web/content/style.css (+0/-156)
.pc/56_ubuntu_arm_generate_pxe_files.patch/cobbler/pxegen.py (+0/-868)
.pc/57_ubuntu_dnsmasq_domain.patch/templates/etc/dnsmasq.template (+0/-20)
.pc/applied-patches (+0/-23)
cobbler/action_check.py (+1/-4)
cobbler/action_import.py (+2/-2)
cobbler/action_litesync.py (+1/-4)
cobbler/action_reposync.py (+2/-11)
cobbler/codes.py (+3/-3)
cobbler/item_distro.py (+1/-1)
cobbler/item_image.py (+1/-1)
cobbler/item_repo.py (+1/-1)
cobbler/modules/manage_bind.py (+3/-3)
cobbler/modules/manage_import_debian_ubuntu.py (+15/-68)
cobbler/modules/manage_in_tftpd.py (+3/-3)
cobbler/modules/manage_isc.py (+2/-2)
cobbler/modules/sync_post_restart_services.py (+2/-2)
cobbler/pxegen.py (+10/-49)
cobbler/utils.py (+2/-2)
config/cobbler_web.conf (+5/-1)
config/modules.conf (+1/-1)
config/settings (+5/-12)
kickstarts/sample.seed (+0/-2)
koan/app.py (+53/-107)
templates/etc/dnsmasq.template (+0/-1)
templates/etc/named.template (+19/-2)
templates/pxe/gpxemenu.template (+0/-2)
templates/pxe/pxeprofile_arm.template (+0/-5)
templates/pxe/pxesystem_arm.template (+0/-7)
web/cobbler_web/templates/import.tmpl (+0/-1)
web/cobbler_web/templates/login.tmpl (+1/-2)
web/cobbler_web/templates/master.tmpl (+0/-1)
web/content/style.css (+12/-27)
web/settings.py (+1/-1)
Conflict: can't delete .pc/47_ubuntu_add_oneiric_codename.patch because it is not empty.  Not deleting.
Conflict because .pc/47_ubuntu_add_oneiric_codename.patch is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/47_ubuntu_add_oneiric_codename.patch/cobbler because it is not empty.  Not deleting.
Conflict because .pc/47_ubuntu_add_oneiric_codename.patch/cobbler is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/47_ubuntu_add_oneiric_codename.patch/cobbler/modules because it is not empty.  Not deleting.
Conflict because .pc/47_ubuntu_add_oneiric_codename.patch/cobbler/modules is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/47_ubuntu_add_oneiric_codename.patch/cobbler/modules/manage_import_debian_ubuntu.py
Conflict: can't delete .pc/48_ubuntu_mini_iso_autodetect.patch because it is not empty.  Not deleting.
Conflict because .pc/48_ubuntu_mini_iso_autodetect.patch is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/48_ubuntu_mini_iso_autodetect.patch/cobbler because it is not empty.  Not deleting.
Conflict because .pc/48_ubuntu_mini_iso_autodetect.patch/cobbler is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/48_ubuntu_mini_iso_autodetect.patch/cobbler/modules because it is not empty.  Not deleting.
Conflict because .pc/48_ubuntu_mini_iso_autodetect.patch/cobbler/modules is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/48_ubuntu_mini_iso_autodetect.patch/cobbler/modules/manage_import_debian_ubuntu.py
Conflict: can't delete .pc/49_ubuntu_add_arm_arch_support.patch because it is not empty.  Not deleting.
Conflict because .pc/49_ubuntu_add_arm_arch_support.patch is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/49_ubuntu_add_arm_arch_support.patch/cobbler because it is not empty.  Not deleting.
Conflict because .pc/49_ubuntu_add_arm_arch_support.patch/cobbler is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/49_ubuntu_add_arm_arch_support.patch/cobbler/modules because it is not empty.  Not deleting.
Conflict because .pc/49_ubuntu_add_arm_arch_support.patch/cobbler/modules is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/49_ubuntu_add_arm_arch_support.patch/cobbler/modules/manage_import_debian_ubuntu.py
Text conflict in cobbler/modules/manage_import_debian_ubuntu.py
To merge this branch: bzr merge lp:~ubuntu-branches/ubuntu/precise/cobbler/precise-201110250011
Reviewer Review Type Date Requested Status
Ubuntu branches Pending
Review via email: mp+80279@code.launchpad.net

Description of the change

The package importer has detected a possible inconsistency between the package history in the archive and the history in bzr. As the archive is authoritative the importer has made lp:ubuntu/precise/cobbler reflect what is in the archive and the old bzr branch has been pushed to lp:~ubuntu-branches/ubuntu/precise/cobbler/precise-201110250011. This merge proposal was created so that an Ubuntu developer can review the situations and perform a merge/upload if necessary. There are three typical cases where this can happen.
  1. Where someone pushes a change to bzr and someone else uploads the package without that change. This is the reason that this check is done by the importer. If this appears to be the case then a merge/upload should be done if the changes that were in bzr are still desirable.
  2. The importer incorrectly detected the above situation when someone made a change in bzr and then uploaded it.
  3. The importer incorrectly detected the above situation when someone just uploaded a package and didn't touch bzr.

If this case doesn't appear to be the first situation then set the status of the merge proposal to "Rejected" and help avoid the problem in future by filing a bug at https://bugs.launchpad.net/udd linking to this merge proposal.

(this is an automatically generated message)

To post a comment you must log in.

Unmerged revisions

51. By Andres Rodriguez

* debian/patches:
  - 42_fix_repomirror_create_sync.patch: Updated to correctly create the
    mirror. (LP: #872926)
* debian/cobbler-web.postrm: Remove symlinks that were created on
  postinst. (LP: #872892)

50. By Andres Rodriguez

* debian/cobbler-ubuntu-import:
    - Check update pockets for releases on download and update check.
      (LP: #850880 )
    - Allow '-u' to upgrade existing profiles to a newer version of an ISO.
      (LP: #850886)
    - '-v' to allow debug msgs.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== removed directory '.pc/05_cobbler_fix_reposync_permissions.patch'
2=== removed directory '.pc/05_cobbler_fix_reposync_permissions.patch/cobbler'
3=== removed file '.pc/05_cobbler_fix_reposync_permissions.patch/cobbler/action_reposync.py'
4--- .pc/05_cobbler_fix_reposync_permissions.patch/cobbler/action_reposync.py 2011-01-28 14:39:12 +0000
5+++ .pc/05_cobbler_fix_reposync_permissions.patch/cobbler/action_reposync.py 1970-01-01 00:00:00 +0000
6@@ -1,568 +0,0 @@
7-"""
8-Builds out and synchronizes yum repo mirrors.
9-Initial support for rsync, perhaps reposync coming later.
10-
11-Copyright 2006-2007, Red Hat, Inc
12-Michael DeHaan <mdehaan@redhat.com>
13-
14-This program is free software; you can redistribute it and/or modify
15-it under the terms of the GNU General Public License as published by
16-the Free Software Foundation; either version 2 of the License, or
17-(at your option) any later version.
18-
19-This program is distributed in the hope that it will be useful,
20-but WITHOUT ANY WARRANTY; without even the implied warranty of
21-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22-GNU General Public License for more details.
23-
24-You should have received a copy of the GNU General Public License
25-along with this program; if not, write to the Free Software
26-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27-02110-1301 USA
28-"""
29-
30-import os
31-import os.path
32-import time
33-import yaml # Howell-Clark version
34-import sys
35-HAS_YUM = True
36-try:
37- import yum
38-except:
39- HAS_YUM = False
40-
41-import utils
42-from cexceptions import *
43-import traceback
44-import errno
45-from utils import _
46-import clogger
47-
48-class RepoSync:
49- """
50- Handles conversion of internal state to the tftpboot tree layout
51- """
52-
53- # ==================================================================================
54-
55- def __init__(self,config,tries=1,nofail=False,logger=None):
56- """
57- Constructor
58- """
59- self.verbose = True
60- self.api = config.api
61- self.config = config
62- self.distros = config.distros()
63- self.profiles = config.profiles()
64- self.systems = config.systems()
65- self.settings = config.settings()
66- self.repos = config.repos()
67- self.rflags = self.settings.reposync_flags
68- self.tries = tries
69- self.nofail = nofail
70- self.logger = logger
71-
72- if logger is None:
73- self.logger = clogger.Logger()
74-
75- self.logger.info("hello, reposync")
76-
77-
78- # ===================================================================
79-
80- def run(self, name=None, verbose=True):
81- """
82- Syncs the current repo configuration file with the filesystem.
83- """
84-
85- self.logger.info("run, reposync, run!")
86-
87- try:
88- self.tries = int(self.tries)
89- except:
90- utils.die(self.logger,"retry value must be an integer")
91-
92- self.verbose = verbose
93-
94- report_failure = False
95- for repo in self.repos:
96-
97- env = repo.environment
98-
99- for k in env.keys():
100- self.logger.info("environment: %s=%s" % (k,env[k]))
101- if env[k] is not None:
102- os.putenv(k,env[k])
103-
104- if name is not None and repo.name != name:
105- # invoked to sync only a specific repo, this is not the one
106- continue
107- elif name is None and not repo.keep_updated:
108- # invoked to run against all repos, but this one is off
109- self.logger.info("%s is set to not be updated" % repo.name)
110- continue
111-
112- repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
113- repo_path = os.path.join(repo_mirror, repo.name)
114- mirror = repo.mirror
115-
116- if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
117- os.makedirs(repo_path)
118-
119- # which may actually NOT reposync if the repo is set to not mirror locally
120- # but that's a technicality
121-
122- for x in range(self.tries+1,1,-1):
123- success = False
124- try:
125- self.sync(repo)
126- success = True
127- except:
128- utils.log_exc(self.logger)
129- self.logger.warning("reposync failed, tries left: %s" % (x-2))
130-
131- if not success:
132- report_failure = True
133- if not self.nofail:
134- utils.die(self.logger,"reposync failed, retry limit reached, aborting")
135- else:
136- self.logger.error("reposync failed, retry limit reached, skipping")
137-
138- self.update_permissions(repo_path)
139-
140- if report_failure:
141- utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")
142-
143- return True
144-
145- # ==================================================================================
146-
147- def sync(self, repo):
148-
149- """
150- Conditionally sync a repo, based on type.
151- """
152-
153- if repo.breed == "rhn":
154- return self.rhn_sync(repo)
155- elif repo.breed == "yum":
156- return self.yum_sync(repo)
157- elif repo.breed == "apt":
158- return self.apt_sync(repo)
159- elif repo.breed == "rsync":
160- return self.rsync_sync(repo)
161- else:
162- utils.die(self.logger,"unable to sync repo (%s), unknown or unsupported repo type (%s)" % (repo.name, repo.breed))
163-
164- # ====================================================================================
165-
166- def createrepo_walker(self, repo, dirname, fnames):
167- """
168- Used to run createrepo on a copied Yum mirror.
169- """
170- if os.path.exists(dirname) or repo['breed'] == 'rsync':
171- utils.remove_yum_olddata(dirname)
172-
173- # add any repo metadata we can use
174- mdoptions = []
175- if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
176- if not HAS_YUM:
177- utils.die(self.logger,"yum is required to use this feature")
178-
179- rmd = yum.repoMDObject.RepoMD('', "%s/.origin/repomd.xml" % (dirname))
180- if rmd.repoData.has_key("group"):
181- groupmdfile = rmd.getData("group").location[1]
182- mdoptions.append("-g %s" % groupmdfile)
183- if rmd.repoData.has_key("prestodelta"):
184- # need createrepo >= 0.9.7 to add deltas
185- if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
186- cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
187- createrepo_ver = utils.subprocess_get(self.logger, cmd)
188- if createrepo_ver >= "0.9.7":
189- mdoptions.append("--deltas")
190- else:
191- self.logger.error("this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.")
192-
193- blended = utils.blender(self.api, False, repo)
194- flags = blended.get("createrepo_flags","(ERROR: FLAGS)")
195- try:
196- # BOOKMARK
197- cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
198- utils.subprocess_call(self.logger, cmd)
199- except:
200- utils.log_exc(self.logger)
201- self.logger.error("createrepo failed.")
202- del fnames[:] # we're in the right place
203-
204- # ====================================================================================
205-
206- def rsync_sync(self, repo):
207-
208- """
209- Handle copying of rsync:// and rsync-over-ssh repos.
210- """
211-
212- repo_mirror = repo.mirror
213-
214- if not repo.mirror_locally:
215- utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")
216-
217- if repo.rpm_list != "" and repo.rpm_list != []:
218- self.logger.warning("--rpm-list is not supported for rsync'd repositories")
219-
220- # FIXME: don't hardcode
221- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
222-
223- spacer = ""
224- if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
225- spacer = "-e ssh"
226- if not repo.mirror.endswith("/"):
227- repo.mirror = "%s/" % repo.mirror
228-
229- # FIXME: wrapper for subprocess that logs to logger
230- cmd = "rsync -rltDv %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
231- rc = utils.subprocess_call(self.logger, cmd)
232-
233- if rc !=0:
234- utils.die(self.logger,"cobbler reposync failed")
235- os.path.walk(dest_path, self.createrepo_walker, repo)
236- self.create_local_file(dest_path, repo)
237-
238- # ====================================================================================
239-
240- def rhn_sync(self, repo):
241-
242- """
243- Handle mirroring of RHN repos.
244- """
245-
246- repo_mirror = repo.mirror
247-
248- # FIXME? warn about not having yum-utils. We don't want to require it in the package because
249- # RHEL4 and RHEL5U0 don't have it.
250-
251- if not os.path.exists("/usr/bin/reposync"):
252- utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
253-
254- cmd = "" # command to run
255- has_rpm_list = False # flag indicating not to pull the whole repo
256-
257- # detect cases that require special handling
258-
259- if repo.rpm_list != "" and repo.rpm_list != []:
260- has_rpm_list = True
261-
262- # create yum config file for use by reposync
263- # FIXME: don't hardcode
264- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
265- temp_path = os.path.join(dest_path, ".origin")
266-
267- if not os.path.isdir(temp_path):
268- # FIXME: there's a chance this might break the RHN D/L case
269- os.makedirs(temp_path)
270-
271- # how we invoke yum-utils depends on whether this is RHN content or not.
272-
273-
274- # this is the somewhat more-complex RHN case.
275- # NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
276- if not repo.mirror_locally:
277- utils.die("rhn:// repos do not work with --mirror-locally=1")
278-
279- if has_rpm_list:
280- self.logger.warning("warning: --rpm-list is not supported for RHN content")
281- rest = repo.mirror[6:] # everything after rhn://
282- cmd = "/usr/bin/reposync %s -r %s --download_path=%s" % (self.rflags, rest, "/var/www/cobbler/repo_mirror")
283- if repo.name != rest:
284- args = { "name" : repo.name, "rest" : rest }
285- utils.die(self.logger,"ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository must match the name of the RHN channel" % args)
286-
287- if repo.arch == "i386":
288- # counter-intuitive, but we want the newish kernels too
289- repo.arch = "i686"
290-
291- if repo.arch != "":
292- cmd = "%s -a %s" % (cmd, repo.arch)
293-
294- # now regardless of whether we're doing yumdownloader or reposync
295- # or whether the repo was http://, ftp://, or rhn://, execute all queued
296- # commands here. Any failure at any point stops the operation.
297-
298- if repo.mirror_locally:
299- rc = utils.subprocess_call(self.logger, cmd)
300- # Don't die if reposync fails, it is logged
301- # if rc !=0:
302- # utils.die(self.logger,"cobbler reposync failed")
303-
304- # some more special case handling for RHN.
305- # create the config file now, because the directory didn't exist earlier
306-
307- temp_file = self.create_local_file(temp_path, repo, output=False)
308-
309- # now run createrepo to rebuild the index
310-
311- if repo.mirror_locally:
312- os.path.walk(dest_path, self.createrepo_walker, repo)
313-
314- # create the config file the hosts will use to access the repository.
315-
316- self.create_local_file(dest_path, repo)
317-
318- # ====================================================================================
319-
320- def yum_sync(self, repo):
321-
322- """
323- Handle copying of http:// and ftp:// yum repos.
324- """
325-
326- repo_mirror = repo.mirror
327-
328- # warn about not having yum-utils. We don't want to require it in the package because
329- # RHEL4 and RHEL5U0 don't have it.
330-
331- if not os.path.exists("/usr/bin/reposync"):
332- utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
333-
334- cmd = "" # command to run
335- has_rpm_list = False # flag indicating not to pull the whole repo
336-
337- # detect cases that require special handling
338-
339- if repo.rpm_list != "" and repo.rpm_list != []:
340- has_rpm_list = True
341-
342- # create yum config file for use by reposync
343- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
344- temp_path = os.path.join(dest_path, ".origin")
345-
346- if not os.path.isdir(temp_path) and repo.mirror_locally:
347- # FIXME: there's a chance this might break the RHN D/L case
348- os.makedirs(temp_path)
349-
350- # create the config file that yum will use for the copying
351-
352- if repo.mirror_locally:
353- temp_file = self.create_local_file(temp_path, repo, output=False)
354-
355- if not has_rpm_list and repo.mirror_locally:
356- # if we have not requested only certain RPMs, use reposync
357- cmd = "/usr/bin/reposync %s --config=%s --repoid=%s --download_path=%s" % (self.rflags, temp_file, repo.name, "/var/www/cobbler/repo_mirror")
358- if repo.arch != "":
359- if repo.arch == "x86":
360- repo.arch = "i386" # FIX potential arch errors
361- if repo.arch == "i386":
362- # counter-intuitive, but we want the newish kernels too
363- cmd = "%s -a i686" % (cmd)
364- else:
365- cmd = "%s -a %s" % (cmd, repo.arch)
366-
367- elif repo.mirror_locally:
368-
369- # create the output directory if it doesn't exist
370- if not os.path.exists(dest_path):
371- os.makedirs(dest_path)
372-
373- use_source = ""
374- if repo.arch == "src":
375- use_source = "--source"
376-
377- # older yumdownloader sometimes explodes on --resolvedeps
378- # if this happens to you, upgrade yum & yum-utils
379- extra_flags = self.settings.yumdownloader_flags
380- cmd = "/usr/bin/yumdownloader %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (extra_flags, use_source, repo.name, temp_file, dest_path, " ".join(repo.rpm_list))
381-
382- # now regardless of whether we're doing yumdownloader or reposync
383- # or whether the repo was http://, ftp://, or rhn://, execute all queued
384- # commands here. Any failure at any point stops the operation.
385-
386- if repo.mirror_locally:
387- rc = utils.subprocess_call(self.logger, cmd)
388- if rc !=0:
389- utils.die(self.logger,"cobbler reposync failed")
390-
391- repodata_path = os.path.join(dest_path, "repodata")
392-
393- if not os.path.exists("/usr/bin/wget"):
394- utils.die(self.logger,"no /usr/bin/wget found, please install wget")
395-
396- # grab repomd.xml and use it to download any metadata we can use
397- cmd2 = "/usr/bin/wget -q %s/repodata/repomd.xml -O %s/repomd.xml" % (repo_mirror, temp_path)
398- rc = utils.subprocess_call(self.logger,cmd2)
399- if rc == 0:
400- # create our repodata directory now, as any extra metadata we're
401- # about to download probably lives there
402- if not os.path.isdir(repodata_path):
403- os.makedirs(repodata_path)
404- rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
405- for mdtype in rmd.repoData.keys():
406- # don't download metadata files that are created by default
407- if mdtype not in ["primary", "primary_db", "filelists", "filelists_db", "other", "other_db"]:
408- mdfile = rmd.getData(mdtype).location[1]
409- cmd3 = "/usr/bin/wget -q %s/%s -O %s/%s" % (repo_mirror, mdfile, dest_path, mdfile)
410- utils.subprocess_call(self.logger,cmd3)
411- if rc !=0:
412- utils.die(self.logger,"wget failed")
413-
414- # now run createrepo to rebuild the index
415-
416- if repo.mirror_locally:
417- os.path.walk(dest_path, self.createrepo_walker, repo)
418-
419- # create the config file the hosts will use to access the repository.
420-
421- self.create_local_file(dest_path, repo)
422-
423- # ====================================================================================
424-
425-
426- def apt_sync(self, repo):
427-
428- """
429- Handle copying of http:// and ftp:// debian repos.
430- """
431-
432- repo_mirror = repo.mirror
433-
434- # warn about not having mirror program.
435-
436- mirror_program = "/usr/bin/debmirror"
437- if not os.path.exists(mirror_program):
438- utils.die(self.logger,"no %s found, please install it"%(mirror_program))
439-
440- cmd = "" # command to run
441- has_rpm_list = False # flag indicating not to pull the whole repo
442-
443- # detect cases that require special handling
444-
445- if repo.rpm_list != "" and repo.rpm_list != []:
446- utils.die(self.logger,"has_rpm_list not yet supported on apt repos")
447-
448- if not repo.arch:
449- utils.die(self.logger,"Architecture is required for apt repositories")
450-
451- # built destination path for the repo
452- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
453-
454- if repo.mirror_locally:
455- mirror = repo.mirror.replace("@@suite@@",repo.os_version)
456-
457- idx = mirror.find("://")
458- method = mirror[:idx]
459- mirror = mirror[idx+3:]
460-
461- idx = mirror.find("/")
462- host = mirror[:idx]
463- mirror = mirror[idx+1:]
464-
465- idx = mirror.rfind("/dists/")
466- suite = mirror[idx+7:]
467- mirror = mirror[:idx]
468-
469- mirror_data = "--method=%s --host=%s --root=%s --dist=%s " % ( method , host , mirror , suite )
470-
471- # FIXME : flags should come from repo instead of being hardcoded
472-
473- rflags = "--passive --nocleanup"
474- for x in repo.yumopts:
475- if repo.yumopts[x]:
476- rflags += " %s %s" % ( x , repo.yumopts[x] )
477- else:
478- rflags += " %s" % x
479- cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data, dest_path)
480- if repo.arch == "src":
481- cmd = "%s --source" % cmd
482- else:
483- arch = repo.arch
484- if arch == "x86":
485- arch = "i386" # FIX potential arch errors
486- if arch == "x86_64":
487- arch = "amd64" # FIX potential arch errors
488- cmd = "%s --nosource -a %s" % (cmd, arch)
489-
490- rc = utils.subprocess_call(self.logger, cmd)
491- if rc !=0:
492- utils.die(self.logger,"cobbler reposync failed")
493-
494-
495- def create_local_file(self, dest_path, repo, output=True):
496- """
497-
498- Creates Yum config files for use by reposync
499-
500- Two uses:
501- (A) output=True, Create local files that can be used with yum on provisioned clients to make use of this mirror.
502- (B) output=False, Create a temporary file for yum to feed into yum for mirroring
503- """
504-
505- # the output case will generate repo configuration files which are usable
506- # for the installed systems. They need to be made compatible with --server-override
507- # which means they are actually templates, which need to be rendered by a cobbler-sync
508- # on per profile/system basis.
509-
510- if output:
511- fname = os.path.join(dest_path,"config.repo")
512- else:
513- fname = os.path.join(dest_path, "%s.repo" % repo.name)
514- self.logger.debug("creating: %s" % fname)
515- if not os.path.exists(dest_path):
516- utils.mkdir(dest_path)
517- config_file = open(fname, "w+")
518- config_file.write("[%s]\n" % repo.name)
519- config_file.write("name=%s\n" % repo.name)
520- optenabled = False
521- optgpgcheck = False
522- if output:
523- if repo.mirror_locally:
524- line = "baseurl=http://${server}/cobbler/repo_mirror/%s\n" % (repo.name)
525- else:
526- mstr = repo.mirror
527- if mstr.startswith("/"):
528- mstr = "file://%s" % mstr
529- line = "baseurl=%s\n" % mstr
530-
531- config_file.write(line)
532- # user may have options specific to certain yum plugins
533- # add them to the file
534- for x in repo.yumopts:
535- config_file.write("%s=%s\n" % (x, repo.yumopts[x]))
536- if x == "enabled":
537- optenabled = True
538- if x == "gpgcheck":
539- optgpgcheck = True
540- else:
541- mstr = repo.mirror
542- if mstr.startswith("/"):
543- mstr = "file://%s" % mstr
544- line = "baseurl=%s\n" % mstr
545- if self.settings.http_port not in (80, '80'):
546- http_server = "%s:%s" % (self.settings.server, self.settings.http_port)
547- else:
548- http_server = self.settings.server
549- line = line.replace("@@server@@",http_server)
550- config_file.write(line)
551- if not optenabled:
552- config_file.write("enabled=1\n")
553- config_file.write("priority=%s\n" % repo.priority)
554- # FIXME: potentially might want a way to turn this on/off on a per-repo basis
555- if not optgpgcheck:
556- config_file.write("gpgcheck=0\n")
557- config_file.close()
558- return fname
559-
560- # ==================================================================================
561-
562- def update_permissions(self, repo_path):
563- """
564- Verifies that permissions and contexts after an rsync are as expected.
565- Sending proper rsync flags should prevent the need for this, though this is largely
566- a safeguard.
567- """
568- # all_path = os.path.join(repo_path, "*")
569- cmd1 = "chown -R root:apache %s" % repo_path
570- utils.subprocess_call(self.logger, cmd1)
571-
572- cmd2 = "chmod -R 755 %s" % repo_path
573- utils.subprocess_call(self.logger, cmd2)
574-
575
576=== removed directory '.pc/12_fix_dhcp_restart.patch'
577=== removed directory '.pc/12_fix_dhcp_restart.patch/cobbler'
578=== removed directory '.pc/12_fix_dhcp_restart.patch/cobbler/modules'
579=== removed file '.pc/12_fix_dhcp_restart.patch/cobbler/modules/sync_post_restart_services.py'
580--- .pc/12_fix_dhcp_restart.patch/cobbler/modules/sync_post_restart_services.py 2011-01-28 14:39:12 +0000
581+++ .pc/12_fix_dhcp_restart.patch/cobbler/modules/sync_post_restart_services.py 1970-01-01 00:00:00 +0000
582@@ -1,66 +0,0 @@
583-import distutils.sysconfig
584-import sys
585-import os
586-import traceback
587-import cexceptions
588-import os
589-import sys
590-import xmlrpclib
591-import cobbler.module_loader as module_loader
592-import cobbler.utils as utils
593-
594-plib = distutils.sysconfig.get_python_lib()
595-mod_path="%s/cobbler" % plib
596-sys.path.insert(0, mod_path)
597-
598-def register():
599- # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
600- # the return of this method indicates the trigger type
601- return "/var/lib/cobbler/triggers/sync/post/*"
602-
603-def run(api,args,logger):
604-
605- settings = api.settings()
606-
607- manage_dhcp = str(settings.manage_dhcp).lower()
608- manage_dns = str(settings.manage_dns).lower()
609- manage_tftpd = str(settings.manage_tftpd).lower()
610- restart_dhcp = str(settings.restart_dhcp).lower()
611- restart_dns = str(settings.restart_dns).lower()
612-
613- which_dhcp_module = module_loader.get_module_from_file("dhcp","module",just_name=True).strip()
614- which_dns_module = module_loader.get_module_from_file("dns","module",just_name=True).strip()
615-
616- # special handling as we don't want to restart it twice
617- has_restarted_dnsmasq = False
618-
619- rc = 0
620- if manage_dhcp != "0":
621- if which_dhcp_module == "manage_isc":
622- if restart_dhcp != "0":
623- rc = utils.subprocess_call(logger, "dhcpd -t -q", shell=True)
624- if rc != 0:
625- logger.error("dhcpd -t failed")
626- return 1
627- rc = utils.subprocess_call(logger,"service dhcpd restart", shell=True)
628- elif which_dhcp_module == "manage_dnsmasq":
629- if restart_dhcp != "0":
630- rc = utils.subprocess_call(logger, "service dnsmasq restart")
631- has_restarted_dnsmasq = True
632- else:
633- logger.error("unknown DHCP engine: %s" % which_dhcp_module)
634- rc = 411
635-
636- if manage_dns != "0" and restart_dns != "0":
637- if which_dns_module == "manage_bind":
638- rc = utils.subprocess_call(logger, "service named restart", shell=True)
639- elif which_dns_module == "manage_dnsmasq" and not has_restarted_dnsmasq:
640- rc = utils.subprocess_call(logger, "service dnsmasq restart", shell=True)
641- elif which_dns_module == "manage_dnsmasq" and has_restarted_dnsmasq:
642- rc = 0
643- else:
644- logger.error("unknown DNS engine: %s" % which_dns_module)
645- rc = 412
646-
647- return rc
648-
649
650=== removed directory '.pc/21_cobbler_use_netboot.patch'
651=== removed directory '.pc/21_cobbler_use_netboot.patch/cobbler'
652=== removed directory '.pc/21_cobbler_use_netboot.patch/cobbler/modules'
653=== removed file '.pc/21_cobbler_use_netboot.patch/cobbler/modules/manage_import_debian_ubuntu.py'
654--- .pc/21_cobbler_use_netboot.patch/cobbler/modules/manage_import_debian_ubuntu.py 2011-01-18 12:03:14 +0000
655+++ .pc/21_cobbler_use_netboot.patch/cobbler/modules/manage_import_debian_ubuntu.py 1970-01-01 00:00:00 +0000
656@@ -1,777 +0,0 @@
657-"""
658-This is some of the code behind 'cobbler sync'.
659-
660-Copyright 2006-2009, Red Hat, Inc
661-Michael DeHaan <mdehaan@redhat.com>
662-John Eckersberg <jeckersb@redhat.com>
663-
664-This program is free software; you can redistribute it and/or modify
665-it under the terms of the GNU General Public License as published by
666-the Free Software Foundation; either version 2 of the License, or
667-(at your option) any later version.
668-
669-This program is distributed in the hope that it will be useful,
670-but WITHOUT ANY WARRANTY; without even the implied warranty of
671-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
672-GNU General Public License for more details.
673-
674-You should have received a copy of the GNU General Public License
675-along with this program; if not, write to the Free Software
676-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
677-02110-1301 USA
678-"""
679-
680-import os
681-import os.path
682-import shutil
683-import time
684-import sys
685-import glob
686-import traceback
687-import errno
688-import re
689-from utils import popen2
690-from shlex import shlex
691-
692-
693-import utils
694-from cexceptions import *
695-import templar
696-
697-import item_distro
698-import item_profile
699-import item_repo
700-import item_system
701-
702-from utils import _
703-
704-def register():
705- """
706- The mandatory cobbler module registration hook.
707- """
708- return "manage/import"
709-
710-
711-class ImportDebianUbuntuManager:
712-
713- def __init__(self,config,logger):
714- """
715- Constructor
716- """
717- self.logger = logger
718- self.config = config
719- self.api = config.api
720- self.distros = config.distros()
721- self.profiles = config.profiles()
722- self.systems = config.systems()
723- self.settings = config.settings()
724- self.repos = config.repos()
725- self.templar = templar.Templar(config)
726-
727- # required function for import modules
728- def what(self):
729- return "import/debian_ubuntu"
730-
731- # required function for import modules
732- def check_for_signature(self,path,cli_breed):
733- signatures = [
734- 'pool',
735- ]
736-
737- #self.logger.info("scanning %s for a debian/ubuntu distro signature" % path)
738- for signature in signatures:
739- d = os.path.join(path,signature)
740- if os.path.exists(d):
741- self.logger.info("Found a debian/ubuntu compatible signature: %s" % signature)
742- return (True,signature)
743-
744- if cli_breed and cli_breed in self.get_valid_breeds():
745- self.logger.info("Warning: No distro signature for kernel at %s, using value from command line" % path)
746- return (True,None)
747-
748- return (False,None)
749-
750- # required function for import modules
751- def run(self,pkgdir,mirror,mirror_name,network_root=None,kickstart_file=None,rsync_flags=None,arch=None,breed=None,os_version=None):
752- self.pkgdir = pkgdir
753- self.mirror = mirror
754- self.mirror_name = mirror_name
755- self.network_root = network_root
756- self.kickstart_file = kickstart_file
757- self.rsync_flags = rsync_flags
758- self.arch = arch
759- self.breed = breed
760- self.os_version = os_version
761-
762- # some fixups for the XMLRPC interface, which does not use "None"
763- if self.arch == "": self.arch = None
764- if self.mirror == "": self.mirror = None
765- if self.mirror_name == "": self.mirror_name = None
766- if self.kickstart_file == "": self.kickstart_file = None
767- if self.os_version == "": self.os_version = None
768- if self.rsync_flags == "": self.rsync_flags = None
769- if self.network_root == "": self.network_root = None
770-
771- # If no breed was specified on the command line, figure it out
772- if self.breed == None:
773- self.breed = self.get_breed_from_directory()
774- if not self.breed:
775- utils.die(self.logger,"import failed - could not determine breed of debian-based distro")
776-
777- # debug log stuff for testing
778- #self.logger.info("DEBUG: self.pkgdir = %s" % str(self.pkgdir))
779- #self.logger.info("DEBUG: self.mirror = %s" % str(self.mirror))
780- #self.logger.info("DEBUG: self.mirror_name = %s" % str(self.mirror_name))
781- #self.logger.info("DEBUG: self.network_root = %s" % str(self.network_root))
782- #self.logger.info("DEBUG: self.kickstart_file = %s" % str(self.kickstart_file))
783- #self.logger.info("DEBUG: self.rsync_flags = %s" % str(self.rsync_flags))
784- #self.logger.info("DEBUG: self.arch = %s" % str(self.arch))
785- #self.logger.info("DEBUG: self.breed = %s" % str(self.breed))
786- #self.logger.info("DEBUG: self.os_version = %s" % str(self.os_version))
787-
788- # both --import and --name are required arguments
789-
790- if self.mirror is None:
791- utils.die(self.logger,"import failed. no --path specified")
792- if self.mirror_name is None:
793- utils.die(self.logger,"import failed. no --name specified")
794-
795- # if --arch is supplied, validate it to ensure it's valid
796-
797- if self.arch is not None and self.arch != "":
798- self.arch = self.arch.lower()
799- if self.arch == "x86":
800- # be consistent
801- self.arch = "i386"
802- if self.arch not in self.get_valid_arches():
803- utils.die(self.logger,"arch must be one of: %s" % string.join(self.get_valid_arches(),", "))
804-
805- # if we're going to do any copying, set where to put things
806- # and then make sure nothing is already there.
807-
808- self.path = os.path.normpath( "%s/ks_mirror/%s" % (self.settings.webdir, self.mirror_name) )
809- if os.path.exists(self.path) and self.arch is None:
810- # FIXME : Raise exception even when network_root is given ?
811- utils.die(self.logger,"Something already exists at this import location (%s). You must specify --arch to avoid potentially overwriting existing files." % self.path)
812-
813- # import takes a --kickstart for forcing selection that can't be used in all circumstances
814-
815- if self.kickstart_file and not self.breed:
816- utils.die(self.logger,"Kickstart file can only be specified when a specific breed is selected")
817-
818- if self.os_version and not self.breed:
819- utils.die(self.logger,"OS version can only be specified when a specific breed is selected")
820-
821- if self.breed and self.breed.lower() not in self.get_valid_breeds():
822- utils.die(self.logger,"Supplied import breed is not supported by this module")
823-
824- # if --arch is supplied, make sure the user is not importing a path with a different
825- # arch, which would just be silly.
826-
827- if self.arch:
828- # append the arch path to the name if the arch is not already
829- # found in the name.
830- for x in self.get_valid_arches():
831- if self.path.lower().find(x) != -1:
832- if self.arch != x :
833- utils.die(self.logger,"Architecture found on pathname (%s) does not fit the one given in command line (%s)"%(x,self.arch))
834- break
835- else:
836- # FIXME : This is very likely removed later at get_proposed_name, and the guessed arch appended again
837- self.path += ("-%s" % self.arch)
838-
839- # make the output path and mirror content but only if not specifying that a network
840- # accessible support location already exists (this is --available-as on the command line)
841-
842- if self.network_root is None:
843- # we need to mirror (copy) the files
844-
845- utils.mkdir(self.path)
846-
847- if self.mirror.startswith("http://") or self.mirror.startswith("ftp://") or self.mirror.startswith("nfs://"):
848-
849- # http mirrors are kind of primative. rsync is better.
850- # that's why this isn't documented in the manpage and we don't support them.
851- # TODO: how about adding recursive FTP as an option?
852-
853- utils.die(self.logger,"unsupported protocol")
854-
855- else:
856-
857- # good, we're going to use rsync..
858- # we don't use SSH for public mirrors and local files.
859- # presence of user@host syntax means use SSH
860-
861- # kick off the rsync now
862-
863- if not utils.rsync_files(self.mirror, self.path, self.rsync_flags, self.logger):
864- utils.die(self.logger, "failed to rsync the files")
865-
866- else:
867-
868- # rather than mirroring, we're going to assume the path is available
869- # over http, ftp, and nfs, perhaps on an external filer. scanning still requires
870- # --mirror is a filesystem path, but --available-as marks the network path
871-
872- if not os.path.exists(self.mirror):
873- utils.die(self.logger, "path does not exist: %s" % self.mirror)
874-
875- # find the filesystem part of the path, after the server bits, as each distro
876- # URL needs to be calculated relative to this.
877-
878- if not self.network_root.endswith("/"):
879- self.network_root = self.network_root + "/"
880- self.path = os.path.normpath( self.mirror )
881- valid_roots = [ "nfs://", "ftp://", "http://" ]
882- for valid_root in valid_roots:
883- if self.network_root.startswith(valid_root):
884- break
885- else:
886- utils.die(self.logger, "Network root given to --available-as must be nfs://, ftp://, or http://")
887- if self.network_root.startswith("nfs://"):
888- try:
889- (a,b,rest) = self.network_root.split(":",3)
890- except:
891- utils.die(self.logger, "Network root given to --available-as is missing a colon, please see the manpage example.")
892-
893- # now walk the filesystem looking for distributions that match certain patterns
894-
895- self.logger.info("adding distros")
896- distros_added = []
897- # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
898- os.path.walk(self.path, self.distro_adder, distros_added)
899-
900- # find out if we can auto-create any repository records from the install tree
901-
902- if self.network_root is None:
903- self.logger.info("associating repos")
904- # FIXME: this automagic is not possible (yet) without mirroring
905- self.repo_finder(distros_added)
906-
907- # find the most appropriate answer files for each profile object
908-
909- self.logger.info("associating kickstarts")
910- self.kickstart_finder(distros_added)
911-
912- # ensure bootloaders are present
913- self.api.pxegen.copy_bootloaders()
914-
915- return True
916-
917- # required function for import modules
918- def get_valid_arches(self):
919- return ["i386", "ppc", "x86_64", "x86",]
920-
921- # required function for import modules
922- def get_valid_breeds(self):
923- return ["debian","ubuntu"]
924-
925- # required function for import modules
926- def get_valid_os_versions(self):
927- if self.breed == "debian":
928- return ["etch", "lenny", "squeeze", "sid", "stable", "testing", "unstable", "experimental",]
929- elif self.breed == "ubuntu":
930- return ["dapper", "hardy", "karmic", "lucid", "maverick", "natty",]
931- else:
932- return []
933-
934- def get_valid_repo_breeds(self):
935- return ["apt",]
936-
937- def get_release_files(self):
938- """
939- Find distro release packages.
940- """
941- return glob.glob(os.path.join(self.get_rootdir(), "dists/*"))
942-
943- def get_breed_from_directory(self):
944- for breed in self.get_valid_breeds():
945- # NOTE : Although we break the loop after the first match,
946- # multiple debian derived distros can actually live at the same pool -- JP
947- d = os.path.join(self.mirror, breed)
948- if (os.path.islink(d) and os.path.isdir(d) and os.path.realpath(d) == os.path.realpath(self.mirror)) or os.path.basename(self.mirror) == breed:
949- return breed
950- else:
951- return None
952-
953- def get_tree_location(self, distro):
954- """
955- Once a distribution is identified, find the part of the distribution
956- that has the URL in it that we want to use for kickstarting the
957- distribution, and create a ksmeta variable $tree that contains this.
958- """
959-
960- base = self.get_rootdir()
961-
962- if self.network_root is None:
963- dists_path = os.path.join(self.path, "dists")
964- if os.path.isdir(dists_path):
965- tree = "http://@@http_server@@/cblr/ks_mirror/%s" % (self.mirror_name)
966- else:
967- tree = "http://@@http_server@@/cblr/repo_mirror/%s" % (distro.name)
968- self.set_install_tree(distro, tree)
969- else:
970- # where we assign the kickstart source is relative to our current directory
971- # and the input start directory in the crawl. We find the path segments
972- # between and tack them on the network source path to find the explicit
973- # network path to the distro that Anaconda can digest.
974- tail = self.path_tail(self.path, base)
975- tree = self.network_root[:-1] + tail
976- self.set_install_tree(distro, tree)
977-
978- return
979-
980- def repo_finder(self, distros_added):
981- for distro in distros_added:
982- self.logger.info("traversing distro %s" % distro.name)
983- # FIXME : Shouldn't decide this the value of self.network_root ?
984- if distro.kernel.find("ks_mirror") != -1:
985- basepath = os.path.dirname(distro.kernel)
986- top = self.get_rootdir()
987- self.logger.info("descent into %s" % top)
988- dists_path = os.path.join(self.path, "dists")
989- if not os.path.isdir(dists_path):
990- self.process_repos()
991- else:
992- self.logger.info("this distro isn't mirrored")
993-
994- def process_repos(self):
995- pass
996-
997- def distro_adder(self,distros_added,dirname,fnames):
998- """
999- This is an os.path.walk routine that finds distributions in the directory
1000- to be scanned and then creates them.
1001- """
1002-
1003- # FIXME: If there are more than one kernel or initrd image on the same directory,
1004- # results are unpredictable
1005-
1006- initrd = None
1007- kernel = None
1008-
1009- for x in fnames:
1010- adtls = []
1011-
1012- fullname = os.path.join(dirname,x)
1013- if os.path.islink(fullname) and os.path.isdir(fullname):
1014- if fullname.startswith(self.path):
1015- self.logger.warning("avoiding symlink loop")
1016- continue
1017- self.logger.info("following symlink: %s" % fullname)
1018- os.path.walk(fullname, self.distro_adder, distros_added)
1019-
1020- if ( x.startswith("initrd") or x.startswith("ramdisk.image.gz") or x.startswith("vmkboot.gz") ) and x != "initrd.size":
1021- initrd = os.path.join(dirname,x)
1022- if ( x.startswith("vmlinu") or x.startswith("kernel.img") or x.startswith("linux") or x.startswith("mboot.c32") ) and x.find("initrd") == -1:
1023- kernel = os.path.join(dirname,x)
1024-
1025- # if we've collected a matching kernel and initrd pair, turn the in and add them to the list
1026- if initrd is not None and kernel is not None:
1027- adtls.append(self.add_entry(dirname,kernel,initrd))
1028- kernel = None
1029- initrd = None
1030-
1031- for adtl in adtls:
1032- distros_added.extend(adtl)
1033-
1034- def add_entry(self,dirname,kernel,initrd):
1035- """
1036- When we find a directory with a valid kernel/initrd in it, create the distribution objects
1037- as appropriate and save them. This includes creating xen and rescue distros/profiles
1038- if possible.
1039- """
1040-
1041- proposed_name = self.get_proposed_name(dirname,kernel)
1042- proposed_arch = self.get_proposed_arch(dirname)
1043-
1044- if self.arch and proposed_arch and self.arch != proposed_arch:
1045- utils.die(self.logger,"Arch from pathname (%s) does not match with supplied one %s"%(proposed_arch,self.arch))
1046-
1047- archs = self.learn_arch_from_tree()
1048- if not archs:
1049- if self.arch:
1050- archs.append( self.arch )
1051- else:
1052- if self.arch and self.arch not in archs:
1053- utils.die(self.logger, "Given arch (%s) not found on imported tree %s"%(self.arch,self.get_pkgdir()))
1054- if proposed_arch:
1055- if archs and proposed_arch not in archs:
1056- self.logger.warning("arch from pathname (%s) not found on imported tree %s" % (proposed_arch,self.get_pkgdir()))
1057- return
1058-
1059- archs = [ proposed_arch ]
1060-
1061- if len(archs)>1:
1062- self.logger.warning("- Warning : Multiple archs found : %s" % (archs))
1063-
1064- distros_added = []
1065-
1066- for pxe_arch in archs:
1067- name = proposed_name + "-" + pxe_arch
1068- existing_distro = self.distros.find(name=name)
1069-
1070- if existing_distro is not None:
1071- self.logger.warning("skipping import, as distro name already exists: %s" % name)
1072- continue
1073-
1074- else:
1075- self.logger.info("creating new distro: %s" % name)
1076- distro = self.config.new_distro()
1077-
1078- if name.find("-autoboot") != -1:
1079- # this is an artifact of some EL-3 imports
1080- continue
1081-
1082- distro.set_name(name)
1083- distro.set_kernel(kernel)
1084- distro.set_initrd(initrd)
1085- distro.set_arch(pxe_arch)
1086- distro.set_breed(self.breed)
1087- # If a version was supplied on command line, we set it now
1088- if self.os_version:
1089- distro.set_os_version(self.os_version)
1090-
1091- self.distros.add(distro,save=True)
1092- distros_added.append(distro)
1093-
1094- existing_profile = self.profiles.find(name=name)
1095-
1096- # see if the profile name is already used, if so, skip it and
1097- # do not modify the existing profile
1098-
1099- if existing_profile is None:
1100- self.logger.info("creating new profile: %s" % name)
1101- #FIXME: The created profile holds a default kickstart, and should be breed specific
1102- profile = self.config.new_profile()
1103- else:
1104- self.logger.info("skipping existing profile, name already exists: %s" % name)
1105- continue
1106-
1107- # save our minimal profile which just points to the distribution and a good
1108- # default answer file
1109-
1110- profile.set_name(name)
1111- profile.set_distro(name)
1112- profile.set_kickstart(self.kickstart_file)
1113-
1114- # depending on the name of the profile we can define a good virt-type
1115- # for usage with koan
1116-
1117- if name.find("-xen") != -1:
1118- profile.set_virt_type("xenpv")
1119- elif name.find("vmware") != -1:
1120- profile.set_virt_type("vmware")
1121- else:
1122- profile.set_virt_type("qemu")
1123-
1124- # save our new profile to the collection
1125-
1126- self.profiles.add(profile,save=True)
1127-
1128- return distros_added
1129-
1130- def get_proposed_name(self,dirname,kernel=None):
1131- """
1132- Given a directory name where we have a kernel/initrd pair, try to autoname
1133- the distribution (and profile) object based on the contents of that path
1134- """
1135-
1136- if self.network_root is not None:
1137- name = self.mirror_name + "-".join(self.path_tail(os.path.dirname(self.path),dirname).split("/"))
1138- else:
1139- # remove the part that says /var/www/cobbler/ks_mirror/name
1140- name = "-".join(dirname.split("/")[5:])
1141-
1142- if kernel is not None and kernel.find("PAE") != -1:
1143- name = name + "-PAE"
1144-
1145- # These are all Ubuntu's doing, the netboot images are buried pretty
1146- # deep. ;-) -JC
1147- name = name.replace("-netboot","")
1148- name = name.replace("-ubuntu-installer","")
1149- name = name.replace("-amd64","")
1150- name = name.replace("-i386","")
1151-
1152- # we know that some kernel paths should not be in the name
1153-
1154- name = name.replace("-images","")
1155- name = name.replace("-pxeboot","")
1156- name = name.replace("-install","")
1157- name = name.replace("-isolinux","")
1158-
1159- # some paths above the media root may have extra path segments we want
1160- # to clean up
1161-
1162- name = name.replace("-os","")
1163- name = name.replace("-tree","")
1164- name = name.replace("var-www-cobbler-", "")
1165- name = name.replace("ks_mirror-","")
1166- name = name.replace("--","-")
1167-
1168- # remove any architecture name related string, as real arch will be appended later
1169-
1170- name = name.replace("chrp","ppc64")
1171-
1172- for separator in [ '-' , '_' , '.' ] :
1173- for arch in [ "i386" , "x86_64" , "ia64" , "ppc64", "ppc32", "ppc", "x86" , "s390x", "s390" , "386" , "amd" ]:
1174- name = name.replace("%s%s" % ( separator , arch ),"")
1175-
1176- return name
1177-
1178- def get_proposed_arch(self,dirname):
1179- """
1180- Given an directory name, can we infer an architecture from a path segment?
1181- """
1182- if dirname.find("x86_64") != -1 or dirname.find("amd") != -1:
1183- return "x86_64"
1184- if dirname.find("ia64") != -1:
1185- return "ia64"
1186- if dirname.find("i386") != -1 or dirname.find("386") != -1 or dirname.find("x86") != -1:
1187- return "i386"
1188- if dirname.find("s390x") != -1:
1189- return "s390x"
1190- if dirname.find("s390") != -1:
1191- return "s390"
1192- if dirname.find("ppc64") != -1 or dirname.find("chrp") != -1:
1193- return "ppc64"
1194- if dirname.find("ppc32") != -1:
1195- return "ppc"
1196- if dirname.find("ppc") != -1:
1197- return "ppc"
1198- return None
1199-
1200- def arch_walker(self,foo,dirname,fnames):
1201- """
1202- See docs on learn_arch_from_tree.
1203-
1204- The TRY_LIST is used to speed up search, and should be dropped for default importer
1205- Searched kernel names are kernel-header, linux-headers-, kernel-largesmp, kernel-hugemem
1206-
1207- This method is useful to get the archs, but also to package type and a raw guess of the breed
1208- """
1209-
1210- # try to find a kernel header RPM and then look at it's arch.
1211- for x in fnames:
1212- if self.match_kernelarch_file(x):
1213- for arch in self.get_valid_arches():
1214- if x.find(arch) != -1:
1215- foo[arch] = 1
1216- for arch in [ "i686" , "amd64" ]:
1217- if x.find(arch) != -1:
1218- foo[arch] = 1
1219-
1220- def kickstart_finder(self,distros_added):
1221- """
1222- For all of the profiles in the config w/o a kickstart, use the
1223- given kickstart file, or look at the kernel path, from that,
1224- see if we can guess the distro, and if we can, assign a kickstart
1225- if one is available for it.
1226- """
1227- for profile in self.profiles:
1228- distro = self.distros.find(name=profile.get_conceptual_parent().name)
1229- if distro is None or not (distro in distros_added):
1230- continue
1231-
1232- kdir = os.path.dirname(distro.kernel)
1233- if self.kickstart_file == None:
1234- for file in self.get_release_files():
1235- results = self.scan_pkg_filename(file)
1236- # FIXME : If os is not found on tree but set with CLI, no kickstart is searched
1237- if results is None:
1238- self.logger.warning("skipping %s" % file)
1239- continue
1240- (flavor, major, minor, release) = results
1241- # Why use set_variance()? scan_pkg_filename() does everything we need now - jcammarata
1242- #version , ks = self.set_variance(flavor, major, minor, distro.arch)
1243- if self.os_version:
1244- if self.os_version != flavor:
1245- utils.die(self.logger,"CLI version differs from tree : %s vs. %s" % (self.os_version,flavor))
1246- distro.set_comment("%s %s (%s.%s.%s) %s" % (self.breed,flavor,major,minor,release,self.arch))
1247- distro.set_os_version(flavor)
1248- # is this even valid for debian/ubuntu? - jcammarata
1249- #ds = self.get_datestamp()
1250- #if ds is not None:
1251- # distro.set_tree_build_time(ds)
1252- profile.set_kickstart("/var/lib/cobbler/kickstarts/sample.seed")
1253- self.profiles.add(profile,save=True)
1254-
1255- self.configure_tree_location(distro)
1256- self.distros.add(distro,save=True) # re-save
1257- self.api.serialize()
1258-
1259- def configure_tree_location(self, distro):
1260- """
1261- Once a distribution is identified, find the part of the distribution
1262- that has the URL in it that we want to use for kickstarting the
1263- distribution, and create a ksmeta variable $tree that contains this.
1264- """
1265-
1266- base = self.get_rootdir()
1267-
1268- if self.network_root is None:
1269- dists_path = os.path.join( self.path , "dists" )
1270- if os.path.isdir( dists_path ):
1271- tree = "http://@@http_server@@/cblr/ks_mirror/%s" % (self.mirror_name)
1272- else:
1273- tree = "http://@@http_server@@/cblr/repo_mirror/%s" % (distro.name)
1274- self.set_install_tree(distro, tree)
1275- else:
1276- # where we assign the kickstart source is relative to our current directory
1277- # and the input start directory in the crawl. We find the path segments
1278- # between and tack them on the network source path to find the explicit
1279- # network path to the distro that Anaconda can digest.
1280- tail = utils.path_tail(self.path, base)
1281- tree = self.network_root[:-1] + tail
1282- self.set_install_tree(distro, tree)
1283-
1284- def get_rootdir(self):
1285- return self.mirror
1286-
1287- def get_pkgdir(self):
1288- if not self.pkgdir:
1289- return None
1290- return os.path.join(self.get_rootdir(),self.pkgdir)
1291-
1292- def set_install_tree(self, distro, url):
1293- distro.ks_meta["tree"] = url
1294-
1295- def learn_arch_from_tree(self):
1296- """
1297- If a distribution is imported from DVD, there is a good chance the path doesn't
1298- contain the arch and we should add it back in so that it's part of the
1299- meaningful name ... so this code helps figure out the arch name. This is important
1300- for producing predictable distro names (and profile names) from differing import sources
1301- """
1302- result = {}
1303- # FIXME : this is called only once, should not be a walk
1304- if self.get_pkgdir():
1305- os.path.walk(self.get_pkgdir(), self.arch_walker, result)
1306- if result.pop("amd64",False):
1307- result["x86_64"] = 1
1308- if result.pop("i686",False):
1309- result["i386"] = 1
1310- return result.keys()
1311-
1312- def match_kernelarch_file(self, filename):
1313- """
1314- Is the given filename a kernel filename?
1315- """
1316- if not filename.endswith("deb"):
1317- return False
1318- if filename.startswith("linux-headers-"):
1319- return True
1320- return False
1321-
1322- def scan_pkg_filename(self, file):
1323- """
1324- Determine what the distro is based on the release package filename.
1325- """
1326- # FIXME: all of these dist_names should probably be put in a function
1327- # which would be called in place of looking in codes.py. Right now
1328- # you have to update both codes.py and this to add a new release
1329- if self.breed == "debian":
1330- dist_names = ['etch','lenny',]
1331- elif self.breed == "ubuntu":
1332- dist_names = ['dapper','hardy','intrepid','jaunty','karmic','lynx','maverick','natty',]
1333- else:
1334- return None
1335-
1336- if os.path.basename(file) in dist_names:
1337- release_file = os.path.join(file,'Release')
1338- self.logger.info("Found %s release file: %s" % (self.breed,release_file))
1339-
1340- f = open(release_file,'r')
1341- lines = f.readlines()
1342- f.close()
1343-
1344- for line in lines:
1345- if line.lower().startswith('version: '):
1346- version = line.split(':')[1].strip()
1347- values = version.split('.')
1348- if len(values) == 1:
1349- # I don't think you'd ever hit this currently with debian or ubuntu,
1350- # just including it for safety reasons
1351- return (os.path.basename(file), values[0], "0", "0")
1352- elif len(values) == 2:
1353- return (os.path.basename(file), values[0], values[1], "0")
1354- elif len(values) > 2:
1355- return (os.path.basename(file), values[0], values[1], values[2])
1356- return None
1357-
1358- def get_datestamp(self):
1359- """
1360- Not used for debian/ubuntu... should probably be removed? - jcammarata
1361- """
1362- pass
1363-
1364- def set_variance(self, flavor, major, minor, arch):
1365- """
1366- Set distro specific versioning.
1367- """
1368- # I don't think this is required anymore, as the scan_pkg_filename() function
1369- # above does everything we need it to - jcammarata
1370- #
1371- #if self.breed == "debian":
1372- # dist_names = { '4.0' : "etch" , '5.0' : "lenny" }
1373- # dist_vers = "%s.%s" % ( major , minor )
1374- # os_version = dist_names[dist_vers]
1375- #
1376- # return os_version , "/var/lib/cobbler/kickstarts/sample.seed"
1377- #elif self.breed == "ubuntu":
1378- # # Release names taken from wikipedia
1379- # dist_names = { '6.4' :"dapper",
1380- # '8.4' :"hardy",
1381- # '8.10' :"intrepid",
1382- # '9.4' :"jaunty",
1383- # '9.10' :"karmic",
1384- # '10.4' :"lynx",
1385- # '10.10':"maverick",
1386- # '11.4' :"natty",
1387- # }
1388- # dist_vers = "%s.%s" % ( major , minor )
1389- # if not dist_names.has_key( dist_vers ):
1390- # dist_names['4ubuntu2.0'] = "IntrepidIbex"
1391- # os_version = dist_names[dist_vers]
1392- #
1393- # return os_version , "/var/lib/cobbler/kickstarts/sample.seed"
1394- #else:
1395- # return None
1396- pass
1397-
1398- def process_repos(self, main_importer, distro):
1399- # Create a disabled repository for the new distro, and the security updates
1400- #
1401- # NOTE : We cannot use ks_meta nor os_version because they get fixed at a later stage
1402-
1403- repo = item_repo.Repo(main_importer.config)
1404- repo.set_breed( "apt" )
1405- repo.set_arch( distro.arch )
1406- repo.set_keep_updated( False )
1407- repo.yumopts["--ignore-release-gpg"] = None
1408- repo.yumopts["--verbose"] = None
1409- repo.set_name( distro.name )
1410- repo.set_os_version( distro.os_version )
1411- # NOTE : The location of the mirror should come from timezone
1412- repo.set_mirror( "http://ftp.%s.debian.org/debian/dists/%s" % ( 'us' , '@@suite@@' ) )
1413-
1414- security_repo = item_repo.Repo(main_importer.config)
1415- security_repo.set_breed( "apt" )
1416- security_repo.set_arch( distro.arch )
1417- security_repo.set_keep_updated( False )
1418- security_repo.yumopts["--ignore-release-gpg"] = None
1419- security_repo.yumopts["--verbose"] = None
1420- security_repo.set_name( distro.name + "-security" )
1421- security_repo.set_os_version( distro.os_version )
1422- # There are no official mirrors for security updates
1423- security_repo.set_mirror( "http://security.debian.org/debian-security/dists/%s/updates" % '@@suite@@' )
1424-
1425- self.logger.info("Added repos for %s" % distro.name)
1426- repos = main_importer.config.repos()
1427- repos.add(repo,save=True)
1428- repos.add(security_repo,save=True)
1429-
1430-# ==========================================================================
1431-
1432-def get_import_manager(config,logger):
1433- return ImportDebianUbuntuManager(config,logger)
1434
1435=== removed directory '.pc/33_authn_configfile.patch'
1436=== removed directory '.pc/33_authn_configfile.patch/config'
1437=== removed file '.pc/33_authn_configfile.patch/config/modules.conf'
1438--- .pc/33_authn_configfile.patch/config/modules.conf 2011-04-04 12:55:44 +0000
1439+++ .pc/33_authn_configfile.patch/config/modules.conf 1970-01-01 00:00:00 +0000
1440@@ -1,86 +0,0 @@
1441-# cobbler module configuration file
1442-# =================================
1443-
1444-# authentication:
1445-# what users can log into the WebUI and Read-Write XMLRPC?
1446-# choices:
1447-# authn_denyall -- no one (default)
1448-# authn_configfile -- use /etc/cobbler/users.digest (for basic setups)
1449-# authn_passthru -- ask Apache to handle it (used for kerberos)
1450-# authn_ldap -- authenticate against LDAP
1451-# authn_spacewalk -- ask Spacewalk/Satellite (experimental)
1452-# authn_testing -- username/password is always testing/testing (debug)
1453-# (user supplied) -- you may write your own module
1454-# WARNING: this is a security setting, do not choose an option blindly.
1455-# for more information:
1456-# https://fedorahosted.org/cobbler/wiki/CobblerWebInterface
1457-# https://fedorahosted.org/cobbler/wiki/CustomizableSecurity
1458-# https://fedorahosted.org/cobbler/wiki/CobblerWithKerberos
1459-# https://fedorahosted.org/cobbler/wiki/CobblerWithLdap
1460-
1461-[authentication]
1462-module = authn_denyall
1463-
1464-# authorization:
1465-# once a user has been cleared by the WebUI/XMLRPC, what can they do?
1466-# choices:
1467-# authz_allowall -- full access for all authneticated users (default)
1468-# authz_ownership -- use users.conf, but add object ownership semantics
1469-# (user supplied) -- you may write your own module
1470-# WARNING: this is a security setting, do not choose an option blindly.
1471-# If you want to further restrict cobbler with ACLs for various groups,
1472-# pick authz_ownership. authz_allowall does not support ACLs. configfile
1473-# does but does not support object ownership which is useful as an additional
1474-# layer of control.
1475-
1476-# for more information:
1477-# https://fedorahosted.org/cobbler/wiki/CobblerWebInterface
1478-# https://fedorahosted.org/cobbler/wiki/CustomizableSecurity
1479-# https://fedorahosted.org/cobbler/wiki/CustomizableAuthorization
1480-# https://fedorahosted.org/cobbler/wiki/AuthorizationWithOwnership
1481-# https://fedorahosted.org/cobbler/wiki/AclFeature
1482-
1483-[authorization]
1484-module = authz_allowall
1485-
1486-# dns:
1487-# chooses the DNS management engine if manage_dns is enabled
1488-# in /etc/cobbler/settings, which is off by default.
1489-# choices:
1490-# manage_bind -- default, uses BIND/named
1491-# manage_dnsmasq -- uses dnsmasq, also must select dnsmasq for dhcp below
1492-# NOTE: more configuration is still required in /etc/cobbler
1493-# for more information:
1494-# https://fedorahosted.org/cobbler/wiki/ManageDns
1495-
1496-[dns]
1497-module = manage_bind
1498-
1499-# dhcp:
1500-# chooses the DHCP management engine if manage_dhcp is enabled
1501-# in /etc/cobbler/settings, which is off by default.
1502-# choices:
1503-# manage_isc -- default, uses ISC dhcpd
1504-# manage_dnsmasq -- uses dnsmasq, also must select dnsmasq for dns above
1505-# NOTE: more configuration is still required in /etc/cobbler
1506-# for more information:
1507-# https://fedorahosted.org/cobbler/wiki/ManageDhcp
1508-
1509-[dhcp]
1510-module = manage_isc
1511-
1512-# tftpd:
1513-# chooses the TFTP management engine if manage_tftp is enabled
1514-# in /etc/cobbler/settings, which is ON by default.
1515-#
1516-# choices:
1517-# manage_in_tftpd -- default, uses the system's tftp server
1518-# manage_tftpd_py -- uses cobbler's tftp server
1519-#
1520-# for more information:
1521-# https://fedorahosted.org/cobbler/wiki/ManageTftp
1522-
1523-[tftpd]
1524-module = manage_in_tftpd
1525-
1526-#--------------------------------------------------
1527
1528=== removed directory '.pc/34_fix_apache_wont_start.patch'
1529=== removed directory '.pc/34_fix_apache_wont_start.patch/config'
1530=== removed file '.pc/34_fix_apache_wont_start.patch/config/cobbler_web.conf'
1531--- .pc/34_fix_apache_wont_start.patch/config/cobbler_web.conf 2011-04-04 12:55:44 +0000
1532+++ .pc/34_fix_apache_wont_start.patch/config/cobbler_web.conf 1970-01-01 00:00:00 +0000
1533@@ -1,14 +0,0 @@
1534-# This configuration file enables the cobbler web
1535-# interface (django version)
1536-
1537-<VirtualHost *:80>
1538-
1539-# Do not log the requests generated from the event notification system
1540-SetEnvIf Request_URI ".*/op/events/user/.*" dontlog
1541-# Log only what remains
1542-CustomLog logs/access_log combined env=!dontlog
1543-
1544-WSGIScriptAlias /cobbler_web /usr/share/cobbler/web/cobbler.wsgi
1545-
1546-</VirtualHost>
1547-
1548
1549=== removed directory '.pc/39_cw_remove_vhost.patch'
1550=== removed directory '.pc/39_cw_remove_vhost.patch/config'
1551=== removed file '.pc/39_cw_remove_vhost.patch/config/cobbler_web.conf'
1552--- .pc/39_cw_remove_vhost.patch/config/cobbler_web.conf 2011-04-15 12:47:39 +0000
1553+++ .pc/39_cw_remove_vhost.patch/config/cobbler_web.conf 1970-01-01 00:00:00 +0000
1554@@ -1,14 +0,0 @@
1555-# This configuration file enables the cobbler web
1556-# interface (django version)
1557-
1558-<VirtualHost *:80>
1559-
1560-# Do not log the requests generated from the event notification system
1561-SetEnvIf Request_URI ".*/op/events/user/.*" dontlog
1562-# Log only what remains
1563-#CustomLog logs/access_log combined env=!dontlog
1564-
1565-WSGIScriptAlias /cobbler_web /usr/share/cobbler/web/cobbler.wsgi
1566-
1567-</VirtualHost>
1568-
1569
1570=== removed directory '.pc/40_ubuntu_bind9_management.patch'
1571=== removed directory '.pc/40_ubuntu_bind9_management.patch/cobbler'
1572=== removed file '.pc/40_ubuntu_bind9_management.patch/cobbler/action_check.py'
1573--- .pc/40_ubuntu_bind9_management.patch/cobbler/action_check.py 2011-04-18 11:15:59 +0000
1574+++ .pc/40_ubuntu_bind9_management.patch/cobbler/action_check.py 1970-01-01 00:00:00 +0000
1575@@ -1,482 +0,0 @@
1576-"""
1577-Validates whether the system is reasonably well configured for
1578-serving up content. This is the code behind 'cobbler check'.
1579-
1580-Copyright 2006-2009, Red Hat, Inc
1581-Michael DeHaan <mdehaan@redhat.com>
1582-
1583-This program is free software; you can redistribute it and/or modify
1584-it under the terms of the GNU General Public License as published by
1585-the Free Software Foundation; either version 2 of the License, or
1586-(at your option) any later version.
1587-
1588-This program is distributed in the hope that it will be useful,
1589-but WITHOUT ANY WARRANTY; without even the implied warranty of
1590-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1591-GNU General Public License for more details.
1592-
1593-You should have received a copy of the GNU General Public License
1594-along with this program; if not, write to the Free Software
1595-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
1596-02110-1301 USA
1597-"""
1598-
1599-import os
1600-import re
1601-import action_sync
1602-import utils
1603-import glob
1604-from utils import _
1605-import clogger
1606-
1607-class BootCheck:
1608-
1609- def __init__(self,config,logger=None):
1610- """
1611- Constructor
1612- """
1613- self.config = config
1614- self.settings = config.settings()
1615- if logger is None:
1616- logger = clogger.Logger()
1617- self.logger = logger
1618-
1619-
1620- def run(self):
1621- """
1622- Returns None if there are no errors, otherwise returns a list
1623- of things to correct prior to running application 'for real'.
1624- (The CLI usage is "cobbler check" before "cobbler sync")
1625- """
1626- status = []
1627- self.checked_dist = utils.check_dist()
1628- self.check_name(status)
1629- self.check_selinux(status)
1630- if self.settings.manage_dhcp:
1631- mode = self.config.api.get_sync().dhcp.what()
1632- if mode == "isc":
1633- self.check_dhcpd_bin(status)
1634- self.check_dhcpd_conf(status)
1635- self.check_service(status,"dhcpd")
1636- elif mode == "dnsmasq":
1637- self.check_dnsmasq_bin(status)
1638- self.check_service(status,"dnsmasq")
1639-
1640- if self.settings.manage_dns:
1641- mode = self.config.api.get_sync().dns.what()
1642- if mode == "bind":
1643- self.check_bind_bin(status)
1644- self.check_service(status,"named")
1645- elif mode == "dnsmasq" and not self.settings.manage_dhcp:
1646- self.check_dnsmasq_bin(status)
1647- self.check_service(status,"dnsmasq")
1648-
1649- mode = self.config.api.get_sync().tftpd.what()
1650- if mode == "in_tftpd":
1651- self.check_tftpd_bin(status)
1652- self.check_tftpd_dir(status)
1653- self.check_tftpd_conf(status)
1654- elif mode == "tftpd_py":
1655- self.check_ctftpd_bin(status)
1656- self.check_ctftpd_dir(status)
1657- self.check_ctftpd_conf(status)
1658-
1659- self.check_service(status, "cobblerd")
1660-
1661- self.check_bootloaders(status)
1662- self.check_rsync_conf(status)
1663- self.check_httpd(status)
1664- self.check_iptables(status)
1665- self.check_yum(status)
1666- self.check_debmirror(status)
1667- self.check_for_ksvalidator(status)
1668- self.check_for_default_password(status)
1669- self.check_for_unreferenced_repos(status)
1670- self.check_for_unsynced_repos(status)
1671- self.check_for_cman(status)
1672-
1673- return status
1674-
1675- def check_for_ksvalidator(self, status):
1676- if self.checked_dist in ["debian", "ubuntu"]:
1677- return
1678-
1679- if not os.path.exists("/usr/bin/ksvalidator"):
1680- status.append("ksvalidator was not found, install pykickstart")
1681-
1682- return True
1683-
1684- def check_for_cman(self, status):
1685- # not doing rpm -q here to be cross-distro friendly
1686- if not os.path.exists("/sbin/fence_ilo") and not os.path.exists("/usr/sbin/fence_ilo"):
1687- status.append("fencing tools were not found, and are required to use the (optional) power management features. install cman or fence-agents to use them")
1688- return True
1689-
1690- def check_service(self, status, which, notes=""):
1691- if notes != "":
1692- notes = " (NOTE: %s)" % notes
1693- rc = 0
1694- if self.checked_dist == "redhat" or self.checked_dist == "suse":
1695- if os.path.exists("/etc/rc.d/init.d/%s" % which):
1696- rc = utils.subprocess_call(self.logger,"/sbin/service %s status > /dev/null 2>/dev/null" % which, shell=True)
1697- if rc != 0:
1698- status.append(_("service %s is not running%s") % (which,notes))
1699- return False
1700- elif self.checked_dist in ["debian", "ubuntu"]:
1701- # we still use /etc/init.d
1702- if os.path.exists("/etc/init.d/%s" % which):
1703- rc = utils.subprocess_call(self.logger,"/etc/init.d/%s status /dev/null 2>/dev/null" % which, shell=True)
1704- if rc != 0:
1705- status.append(_("service %s is not running%s") % which,notes)
1706- return False
1707- elif self.checked_dist == "ubuntu":
1708- if os.path.exists("/etc/init/%s.conf" % which):
1709- rc = utils.subprocess_call(self.logger,"status %s > /dev/null 2>&1" % which, shell=True)
1710- if rc != 0:
1711- status.append(_("service %s is not running%s") % (which,notes))
1712- else:
1713- status.append(_("Unknown distribution type, cannot check for running service %s" % which))
1714- return False
1715- return True
1716-
1717- def check_iptables(self, status):
1718- if os.path.exists("/etc/rc.d/init.d/iptables"):
1719- rc = utils.subprocess_call(self.logger,"/sbin/service iptables status >/dev/null 2>/dev/null", shell=True)
1720- if rc == 0:
1721- status.append(_("since iptables may be running, ensure 69, 80, and %(xmlrpc)s are unblocked") % { "xmlrpc" : self.settings.xmlrpc_port })
1722-
1723- def check_yum(self,status):
1724- if self.checked_dist in ["debian", "ubuntu"]:
1725- return
1726-
1727- if not os.path.exists("/usr/bin/createrepo"):
1728- status.append(_("createrepo package is not installed, needed for cobbler import and cobbler reposync, install createrepo?"))
1729- if not os.path.exists("/usr/bin/reposync"):
1730- status.append(_("reposync is not installed, need for cobbler reposync, install/upgrade yum-utils?"))
1731- if not os.path.exists("/usr/bin/yumdownloader"):
1732- status.append(_("yumdownloader is not installed, needed for cobbler repo add with --rpm-list parameter, install/upgrade yum-utils?"))
1733- if self.settings.reposync_flags.find("-l"):
1734- if self.checked_dist == "redhat" or self.checked_dist == "suse":
1735- yum_utils_ver = utils.subprocess_get(self.logger,"/usr/bin/rpmquery --queryformat=%{VERSION} yum-utils", shell=True)
1736- if yum_utils_ver < "1.1.17":
1737- status.append(_("yum-utils need to be at least version 1.1.17 for reposync -l, current version is %s") % yum_utils_ver )
1738-
1739- def check_debmirror(self,status):
1740- if not os.path.exists("/usr/bin/debmirror"):
1741- status.append(_("debmirror package is not installed, it will be required to manage debian deployments and repositories"))
1742- if os.path.exists("/etc/debmirror.conf"):
1743- f = open("/etc/debmirror.conf")
1744- re_dists = re.compile(r'@dists=')
1745- re_arches = re.compile(r'@arches=')
1746- for line in f.readlines():
1747- if re_dists.search(line) and not line.strip().startswith("#"):
1748- status.append(_("comment 'dists' on /etc/debmirror.conf for proper debian support"))
1749- if re_arches.search(line) and not line.strip().startswith("#"):
1750- status.append(_("comment 'arches' on /etc/debmirror.conf for proper debian support"))
1751-
1752-
1753- def check_name(self,status):
1754- """
1755- If the server name in the config file is still set to localhost
1756- kickstarts run from koan will not have proper kernel line
1757- parameters.
1758- """
1759- if self.settings.server == "127.0.0.1":
1760- status.append(_("The 'server' field in /etc/cobbler/settings must be set to something other than localhost, or kickstarting features will not work. This should be a resolvable hostname or IP for the boot server as reachable by all machines that will use it."))
1761- if self.settings.next_server == "127.0.0.1":
1762- status.append(_("For PXE to be functional, the 'next_server' field in /etc/cobbler/settings must be set to something other than 127.0.0.1, and should match the IP of the boot server on the PXE network."))
1763-
1764- def check_selinux(self,status):
1765- """
1766- Suggests various SELinux rules changes to run Cobbler happily with
1767- SELinux in enforcing mode. FIXME: this method could use some
1768- refactoring in the future.
1769- """
1770- if self.checked_dist in ["debian", "ubuntu"]:
1771- return
1772-
1773- enabled = self.config.api.is_selinux_enabled()
1774- if enabled:
1775- data2 = utils.subprocess_get(self.logger,"/usr/sbin/getsebool -a",shell=True)
1776- for line in data2.split("\n"):
1777- if line.find("httpd_can_network_connect ") != -1:
1778- if line.find("off") != -1:
1779- status.append(_("Must enable a selinux boolean to enable vital web services components, run: setsebool -P httpd_can_network_connect true"))
1780- if line.find("rsync_disable_trans ") != -1:
1781- if line.find("on") != -1:
1782- status.append(_("Must enable the cobbler import and replicate commands, run: setsebool -P rsync_disable_trans=1"))
1783-
1784- data3 = utils.subprocess_get(self.logger,"/usr/sbin/semanage fcontext -l | grep public_content_t",shell=True)
1785-
1786- rule1 = False
1787- rule2 = False
1788- rule3 = False
1789- selinux_msg = "/usr/sbin/semanage fcontext -a -t public_content_t \"%s\""
1790- for line in data3.split("\n"):
1791- if line.startswith("/tftpboot/.*"):
1792- rule1 = True
1793- if line.startswith("/var/lib/tftpboot/.*"):
1794- rule2 = True
1795- if line.startswith("/var/www/cobbler/images/.*"):
1796- rule3 = True
1797-
1798- rules = []
1799- if os.path.exists("/tftpboot") and not rule1:
1800- rules.append(selinux_msg % "/tftpboot/.*")
1801- else:
1802- if not rule2:
1803- rules.append(selinux_msg % "/var/lib/tftpboot/.*")
1804- if not rule3:
1805- rules.append(selinux_msg % "/var/www/cobbler/images/.*")
1806- if len(rules) > 0:
1807- status.append("you need to set some SELinux content rules to ensure cobbler serves content correctly in your SELinux environment, run the following: %s" % " && ".join(rules))
1808-
1809- # now check to see that the Django sessions path is accessible
1810- # by Apache
1811-
1812- data4 = utils.subprocess_get(self.logger,"/usr/sbin/semanage fcontext -l | grep httpd_sys_content_rw_t",shell=True)
1813- selinux_msg = "you need to set some SELinux rules if you want to use cobbler-web (an optional package), run the following: /usr/sbin/semanage fcontext -a -t httpd_sys_content_rw_t \"%s\""
1814- rule4 = False
1815- for line in data4.split("\n"):
1816- if line.startswith("/var/lib/cobbler/webui_sessions/.*"):
1817- rule4 = True
1818- if not rule4:
1819- status.append(selinux_msg % "/var/lib/cobbler/webui_sessions/.*")
1820-
1821-
1822- def check_for_default_password(self,status):
1823- default_pass = self.settings.default_password_crypted
1824- if default_pass == "$1$mF86/UHC$WvcIcX2t6crBz2onWxyac.":
1825- status.append(_("The default password used by the sample templates for newly installed machines (default_password_crypted in /etc/cobbler/settings) is still set to 'cobbler' and should be changed, try: \"openssl passwd -1 -salt 'random-phrase-here' 'your-password-here'\" to generate new one"))
1826-
1827-
1828- def check_for_unreferenced_repos(self,status):
1829- repos = []
1830- referenced = []
1831- not_found = []
1832- for r in self.config.api.repos():
1833- repos.append(r.name)
1834- for p in self.config.api.profiles():
1835- my_repos = p.repos
1836- if my_repos != "<<inherit>>":
1837- referenced.extend(my_repos)
1838- for r in referenced:
1839- if r not in repos and r != "<<inherit>>":
1840- not_found.append(r)
1841- if len(not_found) > 0:
1842- status.append(_("One or more repos referenced by profile objects is no longer defined in cobbler: %s") % ", ".join(not_found))
1843-
1844- def check_for_unsynced_repos(self,status):
1845- need_sync = []
1846- for r in self.config.repos():
1847- if r.mirror_locally == 1:
1848- lookfor = os.path.join(self.settings.webdir, "repo_mirror", r.name)
1849- if not os.path.exists(lookfor):
1850- need_sync.append(r.name)
1851- if len(need_sync) > 0:
1852- status.append(_("One or more repos need to be processed by cobbler reposync for the first time before kickstarting against them: %s") % ", ".join(need_sync))
1853-
1854-
1855- def check_httpd(self,status):
1856- """
1857- Check if Apache is installed.
1858- """
1859- if self.checked_dist in [ "suse", "redhat" ]:
1860- rc = utils.subprocess_get(self.logger,"httpd -v")
1861- else:
1862- rc = utils.subprocess_get(self.logger,"apache2 -v")
1863- if rc.find("Server") == -1:
1864- status.append("Apache (httpd) is not installed and/or in path")
1865-
1866-
1867- def check_dhcpd_bin(self,status):
1868- """
1869- Check if dhcpd is installed
1870- """
1871- if not os.path.exists("/usr/sbin/dhcpd"):
1872- status.append("dhcpd is not installed")
1873-
1874- def check_dnsmasq_bin(self,status):
1875- """
1876- Check if dnsmasq is installed
1877- """
1878- rc = utils.subprocess_get(self.logger,"dnsmasq --help")
1879- if rc.find("Valid options") == -1:
1880- status.append("dnsmasq is not installed and/or in path")
1881-
1882- def check_bind_bin(self,status):
1883- """
1884- Check if bind is installed.
1885- """
1886- rc = utils.subprocess_get(self.logger,"named -v")
1887- # it should return something like "BIND 9.6.1-P1-RedHat-9.6.1-6.P1.fc11"
1888- if rc.find("BIND") == -1:
1889- status.append("named is not installed and/or in path")
1890-
1891- def check_bootloaders(self,status):
1892- """
1893- Check if network bootloaders are installed
1894- """
1895- # FIXME: move zpxe.rexx to loaders
1896-
1897- bootloaders = {
1898- "elilo" : [ "/var/lib/cobbler/loaders/elilo*.efi" ],
1899- "menu.c32" : [ "/usr/share/syslinux/menu.c32",
1900- "/usr/lib/syslinux/menu.c32",
1901- "/var/lib/cobbler/loaders/menu.c32" ],
1902- "yaboot" : [ "/var/lib/cobbler/loaders/yaboot*" ],
1903- "pxelinux.0" : [ "/usr/share/syslinux/pxelinux.0",
1904- "/usr/lib/syslinux/pxelinux.0",
1905- "/var/lib/cobbler/loaders/pxelinux.0" ],
1906- "efi" : [ "/var/lib/cobbler/loaders/grub-x86.efi",
1907- "/var/lib/cobbler/loaders/grub-x86_64.efi" ],
1908- }
1909-
1910- # look for bootloaders at the glob locations above
1911- found_bootloaders = []
1912- items = bootloaders.keys()
1913- for loader_name in items:
1914- patterns = bootloaders[loader_name]
1915- for pattern in patterns:
1916- matches = glob.glob(pattern)
1917- if len(matches) > 0:
1918- found_bootloaders.append(loader_name)
1919- not_found = []
1920-
1921- # invert the list of what we've found so we can report on what we haven't found
1922- for loader_name in items:
1923- if loader_name not in found_bootloaders:
1924- not_found.append(loader_name)
1925-
1926- if len(not_found) > 0:
1927- status.append("some network boot-loaders are missing from /var/lib/cobbler/loaders, you may run 'cobbler get-loaders' to download them, or, if you only want to handle x86/x86_64 netbooting, you may ensure that you have installed a *recent* version of the syslinux package installed and can ignore this message entirely. Files in this directory, should you want to support all architectures, should include pxelinux.0, menu.c32, elilo.efi, and yaboot. The 'cobbler get-loaders' command is the easiest way to resolve these requirements.")
1928-
1929- def check_tftpd_bin(self,status):
1930- """
1931- Check if tftpd is installed
1932- """
1933- if self.checked_dist in ["debian", "ubuntu"]:
1934- return
1935-
1936- if not os.path.exists("/etc/xinetd.d/tftp"):
1937- status.append("missing /etc/xinetd.d/tftp, install tftp-server?")
1938-
1939- def check_tftpd_dir(self,status):
1940- """
1941- Check if cobbler.conf's tftpboot directory exists
1942- """
1943- if self.checked_dist in ["debian", "ubuntu"]:
1944- return
1945-
1946- bootloc = utils.tftpboot_location()
1947- if not os.path.exists(bootloc):
1948- status.append(_("please create directory: %(dirname)s") % { "dirname" : bootloc })
1949-
1950-
1951- def check_tftpd_conf(self,status):
1952- """
1953- Check that configured tftpd boot directory matches with actual
1954- Check that tftpd is enabled to autostart
1955- """
1956- if self.checked_dist in ["debian", "ubuntu"]:
1957- return
1958-
1959- if os.path.exists("/etc/xinetd.d/tftp"):
1960- f = open("/etc/xinetd.d/tftp")
1961- re_disable = re.compile(r'disable.*=.*yes')
1962- for line in f.readlines():
1963- if re_disable.search(line) and not line.strip().startswith("#"):
1964- status.append(_("change 'disable' to 'no' in %(file)s") % { "file" : "/etc/xinetd.d/tftp" })
1965- else:
1966- status.append("missing configuration file: /etc/xinetd.d/tftp")
1967-
1968- def check_ctftpd_bin(self,status):
1969- """
1970- Check if the Cobbler tftp server is installed
1971- """
1972- if self.checked_dist in ["debian", "ubuntu"]:
1973- return
1974-
1975- if not os.path.exists("/etc/xinetd.d/ctftp"):
1976- status.append("missing /etc/xinetd.d/ctftp")
1977-
1978- def check_ctftpd_dir(self,status):
1979- """
1980- Check if cobbler.conf's tftpboot directory exists
1981- """
1982- if self.checked_dist in ["debian", "ubuntu"]:
1983- return
1984-
1985- bootloc = utils.tftpboot_location()
1986- if not os.path.exists(bootloc):
1987- status.append(_("please create directory: %(dirname)s") % { "dirname" : bootloc })
1988-
1989- def check_ctftpd_conf(self,status):
1990- """
1991- Check that configured tftpd boot directory matches with actual
1992- Check that tftpd is enabled to autostart
1993- """
1994- if self.checked_dist in ["debian", "ubuntu"]:
1995- return
1996-
1997- if os.path.exists("/etc/xinetd.d/tftp"):
1998- f = open("/etc/xinetd.d/tftp")
1999- re_disable = re.compile(r'disable.*=.*no')
2000- for line in f.readlines():
2001- if re_disable.search(line) and not line.strip().startswith("#"):
2002- status.append(_("change 'disable' to 'yes' in %(file)s") % { "file" : "/etc/xinetd.d/tftp" })
2003- if os.path.exists("/etc/xinetd.d/ctftp"):
2004- f = open("/etc/xinetd.d/ctftp")
2005- re_disable = re.compile(r'disable.*=.*yes')
2006- for line in f.readlines():
2007- if re_disable.search(line) and not line.strip().startswith("#"):
2008- status.append(_("change 'disable' to 'no' in %(file)s") % { "file" : "/etc/xinetd.d/ctftp" })
2009- else:
2010- status.append("missing configuration file: /etc/xinetd.d/ctftp")
2011-
2012- def check_rsync_conf(self,status):
2013- """
2014- Check that rsync is enabled to autostart
2015- """
2016- if self.checked_dist in ["debian", "ubuntu"]:
2017- return
2018-
2019- if os.path.exists("/etc/xinetd.d/rsync"):
2020- f = open("/etc/xinetd.d/rsync")
2021- re_disable = re.compile(r'disable.*=.*yes')
2022- for line in f.readlines():
2023- if re_disable.search(line) and not line.strip().startswith("#"):
2024- status.append(_("change 'disable' to 'no' in %(file)s") % { "file" : "/etc/xinetd.d/rsync" })
2025- else:
2026- status.append(_("file %(file)s does not exist") % { "file" : "/etc/xinetd.d/rsync" })
2027-
2028-
2029- def check_dhcpd_conf(self,status):
2030- """
2031- NOTE: this code only applies if cobbler is *NOT* set to generate
2032- a dhcp.conf file
2033-
2034- Check that dhcpd *appears* to be configured for pxe booting.
2035- We can't assure file correctness. Since a cobbler user might
2036- have dhcp on another server, it's okay if it's not there and/or
2037- not configured correctly according to automated scans.
2038- """
2039- if not (self.settings.manage_dhcp == 0):
2040- return
2041-
2042- if os.path.exists(self.settings.dhcpd_conf):
2043- match_next = False
2044- match_file = False
2045- f = open(self.settings.dhcpd_conf)
2046- for line in f.readlines():
2047- if line.find("next-server") != -1:
2048- match_next = True
2049- if line.find("filename") != -1:
2050- match_file = True
2051- if not match_next:
2052- status.append(_("expecting next-server entry in %(file)s") % { "file" : self.settings.dhcpd_conf })
2053- if not match_file:
2054- status.append(_("missing file: %(file)s") % { "file" : self.settings.dhcpd_conf })
2055- else:
2056- status.append(_("missing file: %(file)s") % { "file" : self.settings.dhcpd_conf })
2057-
2058
2059=== removed directory '.pc/40_ubuntu_bind9_management.patch/cobbler/modules'
2060=== removed file '.pc/40_ubuntu_bind9_management.patch/cobbler/modules/manage_bind.py'
2061--- .pc/40_ubuntu_bind9_management.patch/cobbler/modules/manage_bind.py 2011-04-18 11:15:59 +0000
2062+++ .pc/40_ubuntu_bind9_management.patch/cobbler/modules/manage_bind.py 1970-01-01 00:00:00 +0000
2063@@ -1,332 +0,0 @@
2064-"""
2065-This is some of the code behind 'cobbler sync'.
2066-
2067-Copyright 2006-2009, Red Hat, Inc
2068-Michael DeHaan <mdehaan@redhat.com>
2069-John Eckersberg <jeckersb@redhat.com>
2070-
2071-This program is free software; you can redistribute it and/or modify
2072-it under the terms of the GNU General Public License as published by
2073-the Free Software Foundation; either version 2 of the License, or
2074-(at your option) any later version.
2075-
2076-This program is distributed in the hope that it will be useful,
2077-but WITHOUT ANY WARRANTY; without even the implied warranty of
2078-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2079-GNU General Public License for more details.
2080-
2081-You should have received a copy of the GNU General Public License
2082-along with this program; if not, write to the Free Software
2083-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
2084-02110-1301 USA
2085-"""
2086-
2087-import os
2088-import os.path
2089-import shutil
2090-import time
2091-import sys
2092-import glob
2093-import traceback
2094-import errno
2095-import re
2096-from shlex import shlex
2097-
2098-
2099-import utils
2100-from cexceptions import *
2101-import templar
2102-
2103-import item_distro
2104-import item_profile
2105-import item_repo
2106-import item_system
2107-
2108-from utils import _
2109-
2110-
2111-def register():
2112- """
2113- The mandatory cobbler module registration hook.
2114- """
2115- return "manage"
2116-
2117-
2118-class BindManager:
2119-
2120- def what(self):
2121- return "bind"
2122-
2123- def __init__(self,config,logger):
2124- """
2125- Constructor
2126- """
2127- self.logger = logger
2128- self.config = config
2129- self.api = config.api
2130- self.distros = config.distros()
2131- self.profiles = config.profiles()
2132- self.systems = config.systems()
2133- self.settings = config.settings()
2134- self.repos = config.repos()
2135- self.templar = templar.Templar(config)
2136-
2137- def regen_hosts(self):
2138- pass # not used
2139-
2140- def __forward_zones(self):
2141- """
2142- Returns a map of zones and the records that belong
2143- in them
2144- """
2145- zones = {}
2146- forward_zones = self.settings.manage_forward_zones
2147- if type(forward_zones) != type([]):
2148- # gracefully handle when user inputs only a single zone
2149- # as a string instead of a list with only a single item
2150- forward_zones = [forward_zones]
2151-
2152- for zone in forward_zones:
2153- zones[zone] = {}
2154-
2155- for system in self.systems:
2156- for (name, interface) in system.interfaces.iteritems():
2157- host = interface["dns_name"]
2158- ip = interface["ip_address"]
2159- if not system.is_management_supported(cidr_ok=False):
2160- continue
2161- if not host or not ip:
2162- # gotsta have some dns_name and ip or else!
2163- continue
2164- if host.find(".") == -1:
2165- continue
2166-
2167- # match the longest zone!
2168- # e.g. if you have a host a.b.c.d.e
2169- # if manage_forward_zones has:
2170- # - c.d.e
2171- # - b.c.d.e
2172- # then a.b.c.d.e should go in b.c.d.e
2173- best_match = ''
2174- for zone in zones.keys():
2175- if re.search('\.%s$' % zone, host) and len(zone) > len(best_match):
2176- best_match = zone
2177-
2178- if best_match == '': # no match
2179- continue
2180-
2181- # strip the zone off the dns_name and append the
2182- # remainder + ip to the zone list
2183- host = re.sub('\.%s$' % best_match, '', host)
2184-
2185- zones[best_match][host] = ip
2186-
2187- return zones
2188-
2189- def __reverse_zones(self):
2190- """
2191- Returns a map of zones and the records that belong
2192- in them
2193- """
2194- zones = {}
2195- reverse_zones = self.settings.manage_reverse_zones
2196- if type(reverse_zones) != type([]):
2197- # gracefully handle when user inputs only a single zone
2198- # as a string instead of a list with only a single item
2199- reverse_zones = [reverse_zones]
2200-
2201- for zone in reverse_zones:
2202- zones[zone] = {}
2203-
2204- for sys in self.systems:
2205- for (name, interface) in sys.interfaces.iteritems():
2206- host = interface["dns_name"]
2207- ip = interface["ip_address"]
2208- if not sys.is_management_supported(cidr_ok=False):
2209- continue
2210- if not host or not ip:
2211- # gotsta have some dns_name and ip or else!
2212- continue
2213-
2214- # match the longest zone!
2215- # e.g. if you have an ip 1.2.3.4
2216- # if manage_reverse_zones has:
2217- # - 1.2
2218- # - 1.2.3
2219- # then 1.2.3.4 should go in 1.2.3
2220- best_match = ''
2221- for zone in zones.keys():
2222- if re.search('^%s\.' % zone, ip) and len(zone) > len(best_match):
2223- best_match = zone
2224-
2225- if best_match == '': # no match
2226- continue
2227-
2228- # strip the zone off the front of the ip
2229- # reverse the rest of the octets
2230- # append the remainder + dns_name
2231- ip = ip.replace(best_match, '', 1)
2232- if ip[0] == '.': # strip leading '.' if it's there
2233- ip = ip[1:]
2234- tokens = ip.split('.')
2235- tokens.reverse()
2236- ip = '.'.join(tokens)
2237- zones[best_match][ip] = host + '.'
2238-
2239- return zones
2240-
2241-
2242- def __write_named_conf(self):
2243- """
2244- Write out the named.conf main config file from the template.
2245- """
2246- settings_file = "/etc/named.conf"
2247- template_file = "/etc/cobbler/named.template"
2248- forward_zones = self.settings.manage_forward_zones
2249- reverse_zones = self.settings.manage_reverse_zones
2250-
2251- metadata = {'forward_zones': self.__forward_zones().keys(),
2252- 'reverse_zones': [],
2253- 'zone_include': ''}
2254-
2255- for zone in metadata['forward_zones']:
2256- txt = """
2257-zone "%(zone)s." {
2258- type master;
2259- file "%(zone)s";
2260-};
2261-""" % {'zone': zone}
2262- metadata['zone_include'] = metadata['zone_include'] + txt
2263-
2264- for zone in self.__reverse_zones().keys():
2265- tokens = zone.split('.')
2266- tokens.reverse()
2267- arpa = '.'.join(tokens) + '.in-addr.arpa'
2268- metadata['reverse_zones'].append((zone, arpa))
2269- txt = """
2270-zone "%(arpa)s." {
2271- type master;
2272- file "%(zone)s";
2273-};
2274-""" % {'arpa': arpa, 'zone': zone}
2275- metadata['zone_include'] = metadata['zone_include'] + txt
2276-
2277- try:
2278- f2 = open(template_file,"r")
2279- except:
2280- raise CX(_("error reading template from file: %s") % template_file)
2281- template_data = ""
2282- template_data = f2.read()
2283- f2.close()
2284-
2285- if self.logger is not None:
2286- self.logger.info("generating %s" % settings_file)
2287- self.templar.render(template_data, metadata, settings_file, None)
2288-
2289- def __ip_sort(self, ips):
2290- """
2291- Sorts IP addresses (or partial addresses) in a numerical fashion per-octet
2292- """
2293- # strings to integer octet chunks so we can sort numerically
2294- octets = map(lambda x: [int(i) for i in x.split('.')], ips)
2295- octets.sort()
2296- # integers back to strings
2297- octets = map(lambda x: [str(i) for i in x], octets)
2298- return ['.'.join(i) for i in octets]
2299-
2300- def __pretty_print_host_records(self, hosts, rectype='A', rclass='IN'):
2301- """
2302- Format host records by order and with consistent indentation
2303- """
2304- names = [k for k,v in hosts.iteritems()]
2305- if not names: return '' # zones with no hosts
2306-
2307- if rectype == 'PTR':
2308- names = self.__ip_sort(names)
2309- else:
2310- names.sort()
2311-
2312- max_name = max([len(i) for i in names])
2313-
2314- s = ""
2315- for name in names:
2316- spacing = " " * (max_name - len(name))
2317- my_name = "%s%s" % (name, spacing)
2318- my_host = hosts[name]
2319- s += "%s %s %s %s\n" % (my_name, rclass, rectype, my_host)
2320- return s
2321-
2322- def __write_zone_files(self):
2323- """
2324- Write out the forward and reverse zone files for all configured zones
2325- """
2326- default_template_file = "/etc/cobbler/zone.template"
2327- cobbler_server = self.settings.server
2328- serial = int(time.time())
2329- forward = self.__forward_zones()
2330- reverse = self.__reverse_zones()
2331-
2332- try:
2333- f2 = open(default_template_file,"r")
2334- except:
2335- raise CX(_("error reading template from file: %s") % default_template_file)
2336- default_template_data = ""
2337- default_template_data = f2.read()
2338- f2.close()
2339-
2340- for (zone, hosts) in forward.iteritems():
2341- metadata = {
2342- 'cobbler_server': cobbler_server,
2343- 'serial': serial,
2344- 'host_record': ''
2345- }
2346-
2347- # grab zone-specific template if it exists
2348- try:
2349- fd = open('/etc/cobbler/zone_templates/%s' % zone)
2350- template_data = fd.read()
2351- fd.close()
2352- except:
2353- template_data = default_template_data
2354-
2355- metadata['host_record'] = self.__pretty_print_host_records(hosts)
2356-
2357- zonefilename='/var/named/' + zone
2358- if self.logger is not None:
2359- self.logger.info("generating (forward) %s" % zonefilename)
2360- self.templar.render(template_data, metadata, zonefilename, None)
2361-
2362- for (zone, hosts) in reverse.iteritems():
2363- metadata = {
2364- 'cobbler_server': cobbler_server,
2365- 'serial': serial,
2366- 'host_record': ''
2367- }
2368-
2369- # grab zone-specific template if it exists
2370- try:
2371- fd = open('/etc/cobbler/zone_templates/%s' % zone)
2372- template_data = fd.read()
2373- fd.close()
2374- except:
2375- template_data = default_template_data
2376-
2377- metadata['host_record'] = self.__pretty_print_host_records(hosts, rectype='PTR')
2378-
2379- zonefilename='/var/named/' + zone
2380- if self.logger is not None:
2381- self.logger.info("generating (reverse) %s" % zonefilename)
2382- self.templar.render(template_data, metadata, zonefilename, None)
2383-
2384-
2385- def write_dns_files(self):
2386- """
2387- BIND files are written when manage_dns is set in
2388- /var/lib/cobbler/settings.
2389- """
2390-
2391- self.__write_named_conf()
2392- self.__write_zone_files()
2393-
2394-def get_manager(config,logger):
2395- return BindManager(config,logger)
2396
2397=== removed file '.pc/40_ubuntu_bind9_management.patch/cobbler/modules/sync_post_restart_services.py'
2398--- .pc/40_ubuntu_bind9_management.patch/cobbler/modules/sync_post_restart_services.py 2011-04-18 11:15:59 +0000
2399+++ .pc/40_ubuntu_bind9_management.patch/cobbler/modules/sync_post_restart_services.py 1970-01-01 00:00:00 +0000
2400@@ -1,66 +0,0 @@
2401-import distutils.sysconfig
2402-import sys
2403-import os
2404-import traceback
2405-import cexceptions
2406-import os
2407-import sys
2408-import xmlrpclib
2409-import cobbler.module_loader as module_loader
2410-import cobbler.utils as utils
2411-
2412-plib = distutils.sysconfig.get_python_lib()
2413-mod_path="%s/cobbler" % plib
2414-sys.path.insert(0, mod_path)
2415-
2416-def register():
2417- # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
2418- # the return of this method indicates the trigger type
2419- return "/var/lib/cobbler/triggers/sync/post/*"
2420-
2421-def run(api,args,logger):
2422-
2423- settings = api.settings()
2424-
2425- manage_dhcp = str(settings.manage_dhcp).lower()
2426- manage_dns = str(settings.manage_dns).lower()
2427- manage_tftpd = str(settings.manage_tftpd).lower()
2428- restart_dhcp = str(settings.restart_dhcp).lower()
2429- restart_dns = str(settings.restart_dns).lower()
2430-
2431- which_dhcp_module = module_loader.get_module_from_file("dhcp","module",just_name=True).strip()
2432- which_dns_module = module_loader.get_module_from_file("dns","module",just_name=True).strip()
2433-
2434- # special handling as we don't want to restart it twice
2435- has_restarted_dnsmasq = False
2436-
2437- rc = 0
2438- if manage_dhcp != "0":
2439- if which_dhcp_module == "manage_isc":
2440- if restart_dhcp != "0":
2441- rc = utils.subprocess_call(logger, "dhcpd -t -q", shell=True)
2442- if rc != 0:
2443- logger.error("dhcpd -t failed")
2444- return 1
2445- rc = utils.subprocess_call(logger,"service isc-dhcp-server restart", shell=True)
2446- elif which_dhcp_module == "manage_dnsmasq":
2447- if restart_dhcp != "0":
2448- rc = utils.subprocess_call(logger, "service dnsmasq restart")
2449- has_restarted_dnsmasq = True
2450- else:
2451- logger.error("unknown DHCP engine: %s" % which_dhcp_module)
2452- rc = 411
2453-
2454- if manage_dns != "0" and restart_dns != "0":
2455- if which_dns_module == "manage_bind":
2456- rc = utils.subprocess_call(logger, "service named restart", shell=True)
2457- elif which_dns_module == "manage_dnsmasq" and not has_restarted_dnsmasq:
2458- rc = utils.subprocess_call(logger, "service dnsmasq restart", shell=True)
2459- elif which_dns_module == "manage_dnsmasq" and has_restarted_dnsmasq:
2460- rc = 0
2461- else:
2462- logger.error("unknown DNS engine: %s" % which_dns_module)
2463- rc = 412
2464-
2465- return rc
2466-
2467
2468=== removed directory '.pc/40_ubuntu_bind9_management.patch/templates'
2469=== removed directory '.pc/40_ubuntu_bind9_management.patch/templates/etc'
2470=== removed file '.pc/40_ubuntu_bind9_management.patch/templates/etc/named.template'
2471--- .pc/40_ubuntu_bind9_management.patch/templates/etc/named.template 2011-04-18 11:15:59 +0000
2472+++ .pc/40_ubuntu_bind9_management.patch/templates/etc/named.template 1970-01-01 00:00:00 +0000
2473@@ -1,31 +0,0 @@
2474-options {
2475- listen-on port 53 { 127.0.0.1; };
2476- directory "/var/named";
2477- dump-file "/var/named/data/cache_dump.db";
2478- statistics-file "/var/named/data/named_stats.txt";
2479- memstatistics-file "/var/named/data/named_mem_stats.txt";
2480- allow-query { localhost; };
2481- recursion yes;
2482-};
2483-
2484-logging {
2485- channel default_debug {
2486- file "data/named.run";
2487- severity dynamic;
2488- };
2489-};
2490-
2491-#for $zone in $forward_zones
2492-zone "${zone}." {
2493- type master;
2494- file "$zone";
2495-};
2496-
2497-#end for
2498-#for $zone, $arpa in $reverse_zones
2499-zone "${arpa}." {
2500- type master;
2501- file "$zone";
2502-};
2503-
2504-#end for
2505
2506=== removed directory '.pc/41_update_tree_path_with_arch.patch'
2507=== removed directory '.pc/41_update_tree_path_with_arch.patch/cobbler'
2508=== removed directory '.pc/41_update_tree_path_with_arch.patch/cobbler/modules'
2509=== removed file '.pc/41_update_tree_path_with_arch.patch/cobbler/modules/manage_import_debian_ubuntu.py'
2510--- .pc/41_update_tree_path_with_arch.patch/cobbler/modules/manage_import_debian_ubuntu.py 2011-05-02 18:26:03 +0000
2511+++ .pc/41_update_tree_path_with_arch.patch/cobbler/modules/manage_import_debian_ubuntu.py 1970-01-01 00:00:00 +0000
2512@@ -1,777 +0,0 @@
2513-"""
2514-This is some of the code behind 'cobbler sync'.
2515-
2516-Copyright 2006-2009, Red Hat, Inc
2517-Michael DeHaan <mdehaan@redhat.com>
2518-John Eckersberg <jeckersb@redhat.com>
2519-
2520-This program is free software; you can redistribute it and/or modify
2521-it under the terms of the GNU General Public License as published by
2522-the Free Software Foundation; either version 2 of the License, or
2523-(at your option) any later version.
2524-
2525-This program is distributed in the hope that it will be useful,
2526-but WITHOUT ANY WARRANTY; without even the implied warranty of
2527-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2528-GNU General Public License for more details.
2529-
2530-You should have received a copy of the GNU General Public License
2531-along with this program; if not, write to the Free Software
2532-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
2533-02110-1301 USA
2534-"""
2535-
2536-import os
2537-import os.path
2538-import shutil
2539-import time
2540-import sys
2541-import glob
2542-import traceback
2543-import errno
2544-import re
2545-from utils import popen2
2546-from shlex import shlex
2547-
2548-
2549-import utils
2550-from cexceptions import *
2551-import templar
2552-
2553-import item_distro
2554-import item_profile
2555-import item_repo
2556-import item_system
2557-
2558-from utils import _
2559-
2560-def register():
2561- """
2562- The mandatory cobbler module registration hook.
2563- """
2564- return "manage/import"
2565-
2566-
2567-class ImportDebianUbuntuManager:
2568-
2569- def __init__(self,config,logger):
2570- """
2571- Constructor
2572- """
2573- self.logger = logger
2574- self.config = config
2575- self.api = config.api
2576- self.distros = config.distros()
2577- self.profiles = config.profiles()
2578- self.systems = config.systems()
2579- self.settings = config.settings()
2580- self.repos = config.repos()
2581- self.templar = templar.Templar(config)
2582-
2583- # required function for import modules
2584- def what(self):
2585- return "import/debian_ubuntu"
2586-
2587- # required function for import modules
2588- def check_for_signature(self,path,cli_breed):
2589- signatures = [
2590- 'pool',
2591- ]
2592-
2593- #self.logger.info("scanning %s for a debian/ubuntu distro signature" % path)
2594- for signature in signatures:
2595- d = os.path.join(path,signature)
2596- if os.path.exists(d):
2597- self.logger.info("Found a debian/ubuntu compatible signature: %s" % signature)
2598- return (True,signature)
2599-
2600- if cli_breed and cli_breed in self.get_valid_breeds():
2601- self.logger.info("Warning: No distro signature for kernel at %s, using value from command line" % path)
2602- return (True,None)
2603-
2604- return (False,None)
2605-
2606- # required function for import modules
2607- def run(self,pkgdir,mirror,mirror_name,network_root=None,kickstart_file=None,rsync_flags=None,arch=None,breed=None,os_version=None):
2608- self.pkgdir = pkgdir
2609- self.mirror = mirror
2610- self.mirror_name = mirror_name
2611- self.network_root = network_root
2612- self.kickstart_file = kickstart_file
2613- self.rsync_flags = rsync_flags
2614- self.arch = arch
2615- self.breed = breed
2616- self.os_version = os_version
2617-
2618- # some fixups for the XMLRPC interface, which does not use "None"
2619- if self.arch == "": self.arch = None
2620- if self.mirror == "": self.mirror = None
2621- if self.mirror_name == "": self.mirror_name = None
2622- if self.kickstart_file == "": self.kickstart_file = None
2623- if self.os_version == "": self.os_version = None
2624- if self.rsync_flags == "": self.rsync_flags = None
2625- if self.network_root == "": self.network_root = None
2626-
2627- # If no breed was specified on the command line, figure it out
2628- if self.breed == None:
2629- self.breed = self.get_breed_from_directory()
2630- if not self.breed:
2631- utils.die(self.logger,"import failed - could not determine breed of debian-based distro")
2632-
2633- # debug log stuff for testing
2634- #self.logger.info("DEBUG: self.pkgdir = %s" % str(self.pkgdir))
2635- #self.logger.info("DEBUG: self.mirror = %s" % str(self.mirror))
2636- #self.logger.info("DEBUG: self.mirror_name = %s" % str(self.mirror_name))
2637- #self.logger.info("DEBUG: self.network_root = %s" % str(self.network_root))
2638- #self.logger.info("DEBUG: self.kickstart_file = %s" % str(self.kickstart_file))
2639- #self.logger.info("DEBUG: self.rsync_flags = %s" % str(self.rsync_flags))
2640- #self.logger.info("DEBUG: self.arch = %s" % str(self.arch))
2641- #self.logger.info("DEBUG: self.breed = %s" % str(self.breed))
2642- #self.logger.info("DEBUG: self.os_version = %s" % str(self.os_version))
2643-
2644- # both --import and --name are required arguments
2645-
2646- if self.mirror is None:
2647- utils.die(self.logger,"import failed. no --path specified")
2648- if self.mirror_name is None:
2649- utils.die(self.logger,"import failed. no --name specified")
2650-
2651- # if --arch is supplied, validate it to ensure it's valid
2652-
2653- if self.arch is not None and self.arch != "":
2654- self.arch = self.arch.lower()
2655- if self.arch == "x86":
2656- # be consistent
2657- self.arch = "i386"
2658- if self.arch not in self.get_valid_arches():
2659- utils.die(self.logger,"arch must be one of: %s" % string.join(self.get_valid_arches(),", "))
2660-
2661- # if we're going to do any copying, set where to put things
2662- # and then make sure nothing is already there.
2663-
2664- self.path = os.path.normpath( "%s/ks_mirror/%s" % (self.settings.webdir, self.mirror_name) )
2665- if os.path.exists(self.path) and self.arch is None:
2666- # FIXME : Raise exception even when network_root is given ?
2667- utils.die(self.logger,"Something already exists at this import location (%s). You must specify --arch to avoid potentially overwriting existing files." % self.path)
2668-
2669- # import takes a --kickstart for forcing selection that can't be used in all circumstances
2670-
2671- if self.kickstart_file and not self.breed:
2672- utils.die(self.logger,"Kickstart file can only be specified when a specific breed is selected")
2673-
2674- if self.os_version and not self.breed:
2675- utils.die(self.logger,"OS version can only be specified when a specific breed is selected")
2676-
2677- if self.breed and self.breed.lower() not in self.get_valid_breeds():
2678- utils.die(self.logger,"Supplied import breed is not supported by this module")
2679-
2680- # if --arch is supplied, make sure the user is not importing a path with a different
2681- # arch, which would just be silly.
2682-
2683- if self.arch:
2684- # append the arch path to the name if the arch is not already
2685- # found in the name.
2686- for x in self.get_valid_arches():
2687- if self.path.lower().find(x) != -1:
2688- if self.arch != x :
2689- utils.die(self.logger,"Architecture found on pathname (%s) does not fit the one given in command line (%s)"%(x,self.arch))
2690- break
2691- else:
2692- # FIXME : This is very likely removed later at get_proposed_name, and the guessed arch appended again
2693- self.path += ("-%s" % self.arch)
2694-
2695- # make the output path and mirror content but only if not specifying that a network
2696- # accessible support location already exists (this is --available-as on the command line)
2697-
2698- if self.network_root is None:
2699- # we need to mirror (copy) the files
2700-
2701- utils.mkdir(self.path)
2702-
2703- if self.mirror.startswith("http://") or self.mirror.startswith("ftp://") or self.mirror.startswith("nfs://"):
2704-
2705- # http mirrors are kind of primative. rsync is better.
2706- # that's why this isn't documented in the manpage and we don't support them.
2707- # TODO: how about adding recursive FTP as an option?
2708-
2709- utils.die(self.logger,"unsupported protocol")
2710-
2711- else:
2712-
2713- # good, we're going to use rsync..
2714- # we don't use SSH for public mirrors and local files.
2715- # presence of user@host syntax means use SSH
2716-
2717- # kick off the rsync now
2718-
2719- if not utils.rsync_files(self.mirror, self.path, self.rsync_flags, self.logger):
2720- utils.die(self.logger, "failed to rsync the files")
2721-
2722- else:
2723-
2724- # rather than mirroring, we're going to assume the path is available
2725- # over http, ftp, and nfs, perhaps on an external filer. scanning still requires
2726- # --mirror is a filesystem path, but --available-as marks the network path
2727-
2728- if not os.path.exists(self.mirror):
2729- utils.die(self.logger, "path does not exist: %s" % self.mirror)
2730-
2731- # find the filesystem part of the path, after the server bits, as each distro
2732- # URL needs to be calculated relative to this.
2733-
2734- if not self.network_root.endswith("/"):
2735- self.network_root = self.network_root + "/"
2736- self.path = os.path.normpath( self.mirror )
2737- valid_roots = [ "nfs://", "ftp://", "http://" ]
2738- for valid_root in valid_roots:
2739- if self.network_root.startswith(valid_root):
2740- break
2741- else:
2742- utils.die(self.logger, "Network root given to --available-as must be nfs://, ftp://, or http://")
2743- if self.network_root.startswith("nfs://"):
2744- try:
2745- (a,b,rest) = self.network_root.split(":",3)
2746- except:
2747- utils.die(self.logger, "Network root given to --available-as is missing a colon, please see the manpage example.")
2748-
2749- # now walk the filesystem looking for distributions that match certain patterns
2750-
2751- self.logger.info("adding distros")
2752- distros_added = []
2753- # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
2754- os.path.walk(self.path, self.distro_adder, distros_added)
2755-
2756- # find out if we can auto-create any repository records from the install tree
2757-
2758- if self.network_root is None:
2759- self.logger.info("associating repos")
2760- # FIXME: this automagic is not possible (yet) without mirroring
2761- self.repo_finder(distros_added)
2762-
2763- # find the most appropriate answer files for each profile object
2764-
2765- self.logger.info("associating kickstarts")
2766- self.kickstart_finder(distros_added)
2767-
2768- # ensure bootloaders are present
2769- self.api.pxegen.copy_bootloaders()
2770-
2771- return True
2772-
2773- # required function for import modules
2774- def get_valid_arches(self):
2775- return ["i386", "ppc", "x86_64", "x86",]
2776-
2777- # required function for import modules
2778- def get_valid_breeds(self):
2779- return ["debian","ubuntu"]
2780-
2781- # required function for import modules
2782- def get_valid_os_versions(self):
2783- if self.breed == "debian":
2784- return ["etch", "lenny", "squeeze", "sid", "stable", "testing", "unstable", "experimental",]
2785- elif self.breed == "ubuntu":
2786- return ["dapper", "hardy", "karmic", "lucid", "maverick", "natty",]
2787- else:
2788- return []
2789-
2790- def get_valid_repo_breeds(self):
2791- return ["apt",]
2792-
2793- def get_release_files(self):
2794- """
2795- Find distro release packages.
2796- """
2797- return glob.glob(os.path.join(self.get_rootdir(), "dists/*"))
2798-
2799- def get_breed_from_directory(self):
2800- for breed in self.get_valid_breeds():
2801- # NOTE : Although we break the loop after the first match,
2802- # multiple debian derived distros can actually live at the same pool -- JP
2803- d = os.path.join(self.mirror, breed)
2804- if (os.path.islink(d) and os.path.isdir(d) and os.path.realpath(d) == os.path.realpath(self.mirror)) or os.path.basename(self.mirror) == breed:
2805- return breed
2806- else:
2807- return None
2808-
2809- def get_tree_location(self, distro):
2810- """
2811- Once a distribution is identified, find the part of the distribution
2812- that has the URL in it that we want to use for kickstarting the
2813- distribution, and create a ksmeta variable $tree that contains this.
2814- """
2815-
2816- base = self.get_rootdir()
2817-
2818- if self.network_root is None:
2819- dists_path = os.path.join(self.path, "dists")
2820- if os.path.isdir(dists_path):
2821- tree = "http://@@http_server@@/cblr/ks_mirror/%s" % (self.mirror_name)
2822- else:
2823- tree = "http://@@http_server@@/cblr/repo_mirror/%s" % (distro.name)
2824- self.set_install_tree(distro, tree)
2825- else:
2826- # where we assign the kickstart source is relative to our current directory
2827- # and the input start directory in the crawl. We find the path segments
2828- # between and tack them on the network source path to find the explicit
2829- # network path to the distro that Anaconda can digest.
2830- tail = self.path_tail(self.path, base)
2831- tree = self.network_root[:-1] + tail
2832- self.set_install_tree(distro, tree)
2833-
2834- return
2835-
2836- def repo_finder(self, distros_added):
2837- for distro in distros_added:
2838- self.logger.info("traversing distro %s" % distro.name)
2839- # FIXME : Shouldn't decide this the value of self.network_root ?
2840- if distro.kernel.find("ks_mirror") != -1:
2841- basepath = os.path.dirname(distro.kernel)
2842- top = self.get_rootdir()
2843- self.logger.info("descent into %s" % top)
2844- dists_path = os.path.join(self.path, "dists")
2845- if not os.path.isdir(dists_path):
2846- self.process_repos()
2847- else:
2848- self.logger.info("this distro isn't mirrored")
2849-
2850- def process_repos(self):
2851- pass
2852-
2853- def distro_adder(self,distros_added,dirname,fnames):
2854- """
2855- This is an os.path.walk routine that finds distributions in the directory
2856- to be scanned and then creates them.
2857- """
2858-
2859- # FIXME: If there are more than one kernel or initrd image on the same directory,
2860- # results are unpredictable
2861-
2862- initrd = None
2863- kernel = None
2864-
2865- for x in fnames:
2866- adtls = []
2867-
2868- fullname = os.path.join(dirname,x)
2869- if os.path.islink(fullname) and os.path.isdir(fullname):
2870- if fullname.startswith(self.path):
2871- self.logger.warning("avoiding symlink loop")
2872- continue
2873- self.logger.info("following symlink: %s" % fullname)
2874- os.path.walk(fullname, self.distro_adder, distros_added)
2875-
2876- if ( x.startswith("initrd.gz") ) and x != "initrd.size":
2877- initrd = os.path.join(dirname,x)
2878- if ( x.startswith("linux") ) and x.find("initrd") == -1:
2879- kernel = os.path.join(dirname,x)
2880-
2881- # if we've collected a matching kernel and initrd pair, turn the in and add them to the list
2882- if initrd is not None and kernel is not None:
2883- adtls.append(self.add_entry(dirname,kernel,initrd))
2884- kernel = None
2885- initrd = None
2886-
2887- for adtl in adtls:
2888- distros_added.extend(adtl)
2889-
2890- def add_entry(self,dirname,kernel,initrd):
2891- """
2892- When we find a directory with a valid kernel/initrd in it, create the distribution objects
2893- as appropriate and save them. This includes creating xen and rescue distros/profiles
2894- if possible.
2895- """
2896-
2897- proposed_name = self.get_proposed_name(dirname,kernel)
2898- proposed_arch = self.get_proposed_arch(dirname)
2899-
2900- if self.arch and proposed_arch and self.arch != proposed_arch:
2901- utils.die(self.logger,"Arch from pathname (%s) does not match with supplied one %s"%(proposed_arch,self.arch))
2902-
2903- archs = self.learn_arch_from_tree()
2904- if not archs:
2905- if self.arch:
2906- archs.append( self.arch )
2907- else:
2908- if self.arch and self.arch not in archs:
2909- utils.die(self.logger, "Given arch (%s) not found on imported tree %s"%(self.arch,self.get_pkgdir()))
2910- if proposed_arch:
2911- if archs and proposed_arch not in archs:
2912- self.logger.warning("arch from pathname (%s) not found on imported tree %s" % (proposed_arch,self.get_pkgdir()))
2913- return
2914-
2915- archs = [ proposed_arch ]
2916-
2917- if len(archs)>1:
2918- self.logger.warning("- Warning : Multiple archs found : %s" % (archs))
2919-
2920- distros_added = []
2921-
2922- for pxe_arch in archs:
2923- name = proposed_name + "-" + pxe_arch
2924- existing_distro = self.distros.find(name=name)
2925-
2926- if existing_distro is not None:
2927- self.logger.warning("skipping import, as distro name already exists: %s" % name)
2928- continue
2929-
2930- else:
2931- self.logger.info("creating new distro: %s" % name)
2932- distro = self.config.new_distro()
2933-
2934- if name.find("-autoboot") != -1:
2935- # this is an artifact of some EL-3 imports
2936- continue
2937-
2938- distro.set_name(name)
2939- distro.set_kernel(kernel)
2940- distro.set_initrd(initrd)
2941- distro.set_arch(pxe_arch)
2942- distro.set_breed(self.breed)
2943- # If a version was supplied on command line, we set it now
2944- if self.os_version:
2945- distro.set_os_version(self.os_version)
2946-
2947- self.distros.add(distro,save=True)
2948- distros_added.append(distro)
2949-
2950- existing_profile = self.profiles.find(name=name)
2951-
2952- # see if the profile name is already used, if so, skip it and
2953- # do not modify the existing profile
2954-
2955- if existing_profile is None:
2956- self.logger.info("creating new profile: %s" % name)
2957- #FIXME: The created profile holds a default kickstart, and should be breed specific
2958- profile = self.config.new_profile()
2959- else:
2960- self.logger.info("skipping existing profile, name already exists: %s" % name)
2961- continue
2962-
2963- # save our minimal profile which just points to the distribution and a good
2964- # default answer file
2965-
2966- profile.set_name(name)
2967- profile.set_distro(name)
2968- profile.set_kickstart(self.kickstart_file)
2969-
2970- # depending on the name of the profile we can define a good virt-type
2971- # for usage with koan
2972-
2973- if name.find("-xen") != -1:
2974- profile.set_virt_type("xenpv")
2975- elif name.find("vmware") != -1:
2976- profile.set_virt_type("vmware")
2977- else:
2978- profile.set_virt_type("qemu")
2979-
2980- # save our new profile to the collection
2981-
2982- self.profiles.add(profile,save=True)
2983-
2984- return distros_added
2985-
2986- def get_proposed_name(self,dirname,kernel=None):
2987- """
2988- Given a directory name where we have a kernel/initrd pair, try to autoname
2989- the distribution (and profile) object based on the contents of that path
2990- """
2991-
2992- if self.network_root is not None:
2993- name = self.mirror_name + "-".join(self.path_tail(os.path.dirname(self.path),dirname).split("/"))
2994- else:
2995- # remove the part that says /var/www/cobbler/ks_mirror/name
2996- name = "-".join(dirname.split("/")[5:])
2997-
2998- if kernel is not None and kernel.find("PAE") != -1:
2999- name = name + "-PAE"
3000-
3001- # These are all Ubuntu's doing, the netboot images are buried pretty
3002- # deep. ;-) -JC
3003- name = name.replace("-netboot","")
3004- name = name.replace("-ubuntu-installer","")
3005- name = name.replace("-amd64","")
3006- name = name.replace("-i386","")
3007-
3008- # we know that some kernel paths should not be in the name
3009-
3010- name = name.replace("-images","")
3011- name = name.replace("-pxeboot","")
3012- name = name.replace("-install","")
3013- name = name.replace("-isolinux","")
3014-
3015- # some paths above the media root may have extra path segments we want
3016- # to clean up
3017-
3018- name = name.replace("-os","")
3019- name = name.replace("-tree","")
3020- name = name.replace("var-www-cobbler-", "")
3021- name = name.replace("ks_mirror-","")
3022- name = name.replace("--","-")
3023-
3024- # remove any architecture name related string, as real arch will be appended later
3025-
3026- name = name.replace("chrp","ppc64")
3027-
3028- for separator in [ '-' , '_' , '.' ] :
3029- for arch in [ "i386" , "x86_64" , "ia64" , "ppc64", "ppc32", "ppc", "x86" , "s390x", "s390" , "386" , "amd" ]:
3030- name = name.replace("%s%s" % ( separator , arch ),"")
3031-
3032- return name
3033-
3034- def get_proposed_arch(self,dirname):
3035- """
3036- Given an directory name, can we infer an architecture from a path segment?
3037- """
3038- if dirname.find("x86_64") != -1 or dirname.find("amd") != -1:
3039- return "x86_64"
3040- if dirname.find("ia64") != -1:
3041- return "ia64"
3042- if dirname.find("i386") != -1 or dirname.find("386") != -1 or dirname.find("x86") != -1:
3043- return "i386"
3044- if dirname.find("s390x") != -1:
3045- return "s390x"
3046- if dirname.find("s390") != -1:
3047- return "s390"
3048- if dirname.find("ppc64") != -1 or dirname.find("chrp") != -1:
3049- return "ppc64"
3050- if dirname.find("ppc32") != -1:
3051- return "ppc"
3052- if dirname.find("ppc") != -1:
3053- return "ppc"
3054- return None
3055-
3056- def arch_walker(self,foo,dirname,fnames):
3057- """
3058- See docs on learn_arch_from_tree.
3059-
3060- The TRY_LIST is used to speed up search, and should be dropped for default importer
3061- Searched kernel names are kernel-header, linux-headers-, kernel-largesmp, kernel-hugemem
3062-
3063- This method is useful to get the archs, but also to package type and a raw guess of the breed
3064- """
3065-
3066- # try to find a kernel header RPM and then look at it's arch.
3067- for x in fnames:
3068- if self.match_kernelarch_file(x):
3069- for arch in self.get_valid_arches():
3070- if x.find(arch) != -1:
3071- foo[arch] = 1
3072- for arch in [ "i686" , "amd64" ]:
3073- if x.find(arch) != -1:
3074- foo[arch] = 1
3075-
3076- def kickstart_finder(self,distros_added):
3077- """
3078- For all of the profiles in the config w/o a kickstart, use the
3079- given kickstart file, or look at the kernel path, from that,
3080- see if we can guess the distro, and if we can, assign a kickstart
3081- if one is available for it.
3082- """
3083- for profile in self.profiles:
3084- distro = self.distros.find(name=profile.get_conceptual_parent().name)
3085- if distro is None or not (distro in distros_added):
3086- continue
3087-
3088- kdir = os.path.dirname(distro.kernel)
3089- if self.kickstart_file == None:
3090- for file in self.get_release_files():
3091- results = self.scan_pkg_filename(file)
3092- # FIXME : If os is not found on tree but set with CLI, no kickstart is searched
3093- if results is None:
3094- self.logger.warning("skipping %s" % file)
3095- continue
3096- (flavor, major, minor, release) = results
3097- # Why use set_variance()? scan_pkg_filename() does everything we need now - jcammarata
3098- #version , ks = self.set_variance(flavor, major, minor, distro.arch)
3099- if self.os_version:
3100- if self.os_version != flavor:
3101- utils.die(self.logger,"CLI version differs from tree : %s vs. %s" % (self.os_version,flavor))
3102- distro.set_comment("%s %s (%s.%s.%s) %s" % (self.breed,flavor,major,minor,release,self.arch))
3103- distro.set_os_version(flavor)
3104- # is this even valid for debian/ubuntu? - jcammarata
3105- #ds = self.get_datestamp()
3106- #if ds is not None:
3107- # distro.set_tree_build_time(ds)
3108- profile.set_kickstart("/var/lib/cobbler/kickstarts/sample.seed")
3109- self.profiles.add(profile,save=True)
3110-
3111- self.configure_tree_location(distro)
3112- self.distros.add(distro,save=True) # re-save
3113- self.api.serialize()
3114-
3115- def configure_tree_location(self, distro):
3116- """
3117- Once a distribution is identified, find the part of the distribution
3118- that has the URL in it that we want to use for kickstarting the
3119- distribution, and create a ksmeta variable $tree that contains this.
3120- """
3121-
3122- base = self.get_rootdir()
3123-
3124- if self.network_root is None:
3125- dists_path = os.path.join( self.path , "dists" )
3126- if os.path.isdir( dists_path ):
3127- tree = "http://@@http_server@@/cblr/ks_mirror/%s" % (self.mirror_name)
3128- else:
3129- tree = "http://@@http_server@@/cblr/repo_mirror/%s" % (distro.name)
3130- self.set_install_tree(distro, tree)
3131- else:
3132- # where we assign the kickstart source is relative to our current directory
3133- # and the input start directory in the crawl. We find the path segments
3134- # between and tack them on the network source path to find the explicit
3135- # network path to the distro that Anaconda can digest.
3136- tail = utils.path_tail(self.path, base)
3137- tree = self.network_root[:-1] + tail
3138- self.set_install_tree(distro, tree)
3139-
3140- def get_rootdir(self):
3141- return self.mirror
3142-
3143- def get_pkgdir(self):
3144- if not self.pkgdir:
3145- return None
3146- return os.path.join(self.get_rootdir(),self.pkgdir)
3147-
3148- def set_install_tree(self, distro, url):
3149- distro.ks_meta["tree"] = url
3150-
3151- def learn_arch_from_tree(self):
3152- """
3153- If a distribution is imported from DVD, there is a good chance the path doesn't
3154- contain the arch and we should add it back in so that it's part of the
3155- meaningful name ... so this code helps figure out the arch name. This is important
3156- for producing predictable distro names (and profile names) from differing import sources
3157- """
3158- result = {}
3159- # FIXME : this is called only once, should not be a walk
3160- if self.get_pkgdir():
3161- os.path.walk(self.get_pkgdir(), self.arch_walker, result)
3162- if result.pop("amd64",False):
3163- result["x86_64"] = 1
3164- if result.pop("i686",False):
3165- result["i386"] = 1
3166- return result.keys()
3167-
3168- def match_kernelarch_file(self, filename):
3169- """
3170- Is the given filename a kernel filename?
3171- """
3172- if not filename.endswith("deb"):
3173- return False
3174- if filename.startswith("linux-headers-"):
3175- return True
3176- return False
3177-
3178- def scan_pkg_filename(self, file):
3179- """
3180- Determine what the distro is based on the release package filename.
3181- """
3182- # FIXME: all of these dist_names should probably be put in a function
3183- # which would be called in place of looking in codes.py. Right now
3184- # you have to update both codes.py and this to add a new release
3185- if self.breed == "debian":
3186- dist_names = ['etch','lenny',]
3187- elif self.breed == "ubuntu":
3188- dist_names = ['dapper','hardy','intrepid','jaunty','karmic','lynx','maverick','natty',]
3189- else:
3190- return None
3191-
3192- if os.path.basename(file) in dist_names:
3193- release_file = os.path.join(file,'Release')
3194- self.logger.info("Found %s release file: %s" % (self.breed,release_file))
3195-
3196- f = open(release_file,'r')
3197- lines = f.readlines()
3198- f.close()
3199-
3200- for line in lines:
3201- if line.lower().startswith('version: '):
3202- version = line.split(':')[1].strip()
3203- values = version.split('.')
3204- if len(values) == 1:
3205- # I don't think you'd ever hit this currently with debian or ubuntu,
3206- # just including it for safety reasons
3207- return (os.path.basename(file), values[0], "0", "0")
3208- elif len(values) == 2:
3209- return (os.path.basename(file), values[0], values[1], "0")
3210- elif len(values) > 2:
3211- return (os.path.basename(file), values[0], values[1], values[2])
3212- return None
3213-
3214- def get_datestamp(self):
3215- """
3216- Not used for debian/ubuntu... should probably be removed? - jcammarata
3217- """
3218- pass
3219-
3220- def set_variance(self, flavor, major, minor, arch):
3221- """
3222- Set distro specific versioning.
3223- """
3224- # I don't think this is required anymore, as the scan_pkg_filename() function
3225- # above does everything we need it to - jcammarata
3226- #
3227- #if self.breed == "debian":
3228- # dist_names = { '4.0' : "etch" , '5.0' : "lenny" }
3229- # dist_vers = "%s.%s" % ( major , minor )
3230- # os_version = dist_names[dist_vers]
3231- #
3232- # return os_version , "/var/lib/cobbler/kickstarts/sample.seed"
3233- #elif self.breed == "ubuntu":
3234- # # Release names taken from wikipedia
3235- # dist_names = { '6.4' :"dapper",
3236- # '8.4' :"hardy",
3237- # '8.10' :"intrepid",
3238- # '9.4' :"jaunty",
3239- # '9.10' :"karmic",
3240- # '10.4' :"lynx",
3241- # '10.10':"maverick",
3242- # '11.4' :"natty",
3243- # }
3244- # dist_vers = "%s.%s" % ( major , minor )
3245- # if not dist_names.has_key( dist_vers ):
3246- # dist_names['4ubuntu2.0'] = "IntrepidIbex"
3247- # os_version = dist_names[dist_vers]
3248- #
3249- # return os_version , "/var/lib/cobbler/kickstarts/sample.seed"
3250- #else:
3251- # return None
3252- pass
3253-
3254- def process_repos(self, main_importer, distro):
3255- # Create a disabled repository for the new distro, and the security updates
3256- #
3257- # NOTE : We cannot use ks_meta nor os_version because they get fixed at a later stage
3258-
3259- repo = item_repo.Repo(main_importer.config)
3260- repo.set_breed( "apt" )
3261- repo.set_arch( distro.arch )
3262- repo.set_keep_updated( False )
3263- repo.yumopts["--ignore-release-gpg"] = None
3264- repo.yumopts["--verbose"] = None
3265- repo.set_name( distro.name )
3266- repo.set_os_version( distro.os_version )
3267- # NOTE : The location of the mirror should come from timezone
3268- repo.set_mirror( "http://ftp.%s.debian.org/debian/dists/%s" % ( 'us' , '@@suite@@' ) )
3269-
3270- security_repo = item_repo.Repo(main_importer.config)
3271- security_repo.set_breed( "apt" )
3272- security_repo.set_arch( distro.arch )
3273- security_repo.set_keep_updated( False )
3274- security_repo.yumopts["--ignore-release-gpg"] = None
3275- security_repo.yumopts["--verbose"] = None
3276- security_repo.set_name( distro.name + "-security" )
3277- security_repo.set_os_version( distro.os_version )
3278- # There are no official mirrors for security updates
3279- security_repo.set_mirror( "http://security.debian.org/debian-security/dists/%s/updates" % '@@suite@@' )
3280-
3281- self.logger.info("Added repos for %s" % distro.name)
3282- repos = main_importer.config.repos()
3283- repos.add(repo,save=True)
3284- repos.add(security_repo,save=True)
3285-
3286-# ==========================================================================
3287-
3288-def get_import_manager(config,logger):
3289- return ImportDebianUbuntuManager(config,logger)
3290
3291=== removed directory '.pc/42_fix_repomirror_create_sync.patch'
3292=== removed directory '.pc/42_fix_repomirror_create_sync.patch/cobbler'
3293=== removed file '.pc/42_fix_repomirror_create_sync.patch/cobbler/action_reposync.py'
3294--- .pc/42_fix_repomirror_create_sync.patch/cobbler/action_reposync.py 2011-06-08 17:21:45 +0000
3295+++ .pc/42_fix_repomirror_create_sync.patch/cobbler/action_reposync.py 1970-01-01 00:00:00 +0000
3296@@ -1,568 +0,0 @@
3297-"""
3298-Builds out and synchronizes yum repo mirrors.
3299-Initial support for rsync, perhaps reposync coming later.
3300-
3301-Copyright 2006-2007, Red Hat, Inc
3302-Michael DeHaan <mdehaan@redhat.com>
3303-
3304-This program is free software; you can redistribute it and/or modify
3305-it under the terms of the GNU General Public License as published by
3306-the Free Software Foundation; either version 2 of the License, or
3307-(at your option) any later version.
3308-
3309-This program is distributed in the hope that it will be useful,
3310-but WITHOUT ANY WARRANTY; without even the implied warranty of
3311-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3312-GNU General Public License for more details.
3313-
3314-You should have received a copy of the GNU General Public License
3315-along with this program; if not, write to the Free Software
3316-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
3317-02110-1301 USA
3318-"""
3319-
3320-import os
3321-import os.path
3322-import time
3323-import yaml # Howell-Clark version
3324-import sys
3325-HAS_YUM = True
3326-try:
3327- import yum
3328-except:
3329- HAS_YUM = False
3330-
3331-import utils
3332-from cexceptions import *
3333-import traceback
3334-import errno
3335-from utils import _
3336-import clogger
3337-
3338-class RepoSync:
3339- """
3340- Handles conversion of internal state to the tftpboot tree layout
3341- """
3342-
3343- # ==================================================================================
3344-
3345- def __init__(self,config,tries=1,nofail=False,logger=None):
3346- """
3347- Constructor
3348- """
3349- self.verbose = True
3350- self.api = config.api
3351- self.config = config
3352- self.distros = config.distros()
3353- self.profiles = config.profiles()
3354- self.systems = config.systems()
3355- self.settings = config.settings()
3356- self.repos = config.repos()
3357- self.rflags = self.settings.reposync_flags
3358- self.tries = tries
3359- self.nofail = nofail
3360- self.logger = logger
3361-
3362- if logger is None:
3363- self.logger = clogger.Logger()
3364-
3365- self.logger.info("hello, reposync")
3366-
3367-
3368- # ===================================================================
3369-
3370- def run(self, name=None, verbose=True):
3371- """
3372- Syncs the current repo configuration file with the filesystem.
3373- """
3374-
3375- self.logger.info("run, reposync, run!")
3376-
3377- try:
3378- self.tries = int(self.tries)
3379- except:
3380- utils.die(self.logger,"retry value must be an integer")
3381-
3382- self.verbose = verbose
3383-
3384- report_failure = False
3385- for repo in self.repos:
3386-
3387- env = repo.environment
3388-
3389- for k in env.keys():
3390- self.logger.info("environment: %s=%s" % (k,env[k]))
3391- if env[k] is not None:
3392- os.putenv(k,env[k])
3393-
3394- if name is not None and repo.name != name:
3395- # invoked to sync only a specific repo, this is not the one
3396- continue
3397- elif name is None and not repo.keep_updated:
3398- # invoked to run against all repos, but this one is off
3399- self.logger.info("%s is set to not be updated" % repo.name)
3400- continue
3401-
3402- repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
3403- repo_path = os.path.join(repo_mirror, repo.name)
3404- mirror = repo.mirror
3405-
3406- if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
3407- os.makedirs(repo_path)
3408-
3409- # which may actually NOT reposync if the repo is set to not mirror locally
3410- # but that's a technicality
3411-
3412- for x in range(self.tries+1,1,-1):
3413- success = False
3414- try:
3415- self.sync(repo)
3416- success = True
3417- except:
3418- utils.log_exc(self.logger)
3419- self.logger.warning("reposync failed, tries left: %s" % (x-2))
3420-
3421- if not success:
3422- report_failure = True
3423- if not self.nofail:
3424- utils.die(self.logger,"reposync failed, retry limit reached, aborting")
3425- else:
3426- self.logger.error("reposync failed, retry limit reached, skipping")
3427-
3428- self.update_permissions(repo_path)
3429-
3430- if report_failure:
3431- utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")
3432-
3433- return True
3434-
3435- # ==================================================================================
3436-
3437- def sync(self, repo):
3438-
3439- """
3440- Conditionally sync a repo, based on type.
3441- """
3442-
3443- if repo.breed == "rhn":
3444- return self.rhn_sync(repo)
3445- elif repo.breed == "yum":
3446- return self.yum_sync(repo)
3447- elif repo.breed == "apt":
3448- return self.apt_sync(repo)
3449- elif repo.breed == "rsync":
3450- return self.rsync_sync(repo)
3451- else:
3452- utils.die(self.logger,"unable to sync repo (%s), unknown or unsupported repo type (%s)" % (repo.name, repo.breed))
3453-
3454- # ====================================================================================
3455-
3456- def createrepo_walker(self, repo, dirname, fnames):
3457- """
3458- Used to run createrepo on a copied Yum mirror.
3459- """
3460- if os.path.exists(dirname) or repo['breed'] == 'rsync':
3461- utils.remove_yum_olddata(dirname)
3462-
3463- # add any repo metadata we can use
3464- mdoptions = []
3465- if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
3466- if not HAS_YUM:
3467- utils.die(self.logger,"yum is required to use this feature")
3468-
3469- rmd = yum.repoMDObject.RepoMD('', "%s/.origin/repomd.xml" % (dirname))
3470- if rmd.repoData.has_key("group"):
3471- groupmdfile = rmd.getData("group").location[1]
3472- mdoptions.append("-g %s" % groupmdfile)
3473- if rmd.repoData.has_key("prestodelta"):
3474- # need createrepo >= 0.9.7 to add deltas
3475- if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
3476- cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
3477- createrepo_ver = utils.subprocess_get(self.logger, cmd)
3478- if createrepo_ver >= "0.9.7":
3479- mdoptions.append("--deltas")
3480- else:
3481- self.logger.error("this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.")
3482-
3483- blended = utils.blender(self.api, False, repo)
3484- flags = blended.get("createrepo_flags","(ERROR: FLAGS)")
3485- try:
3486- # BOOKMARK
3487- cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
3488- utils.subprocess_call(self.logger, cmd)
3489- except:
3490- utils.log_exc(self.logger)
3491- self.logger.error("createrepo failed.")
3492- del fnames[:] # we're in the right place
3493-
3494- # ====================================================================================
3495-
3496- def rsync_sync(self, repo):
3497-
3498- """
3499- Handle copying of rsync:// and rsync-over-ssh repos.
3500- """
3501-
3502- repo_mirror = repo.mirror
3503-
3504- if not repo.mirror_locally:
3505- utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")
3506-
3507- if repo.rpm_list != "" and repo.rpm_list != []:
3508- self.logger.warning("--rpm-list is not supported for rsync'd repositories")
3509-
3510- # FIXME: don't hardcode
3511- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
3512-
3513- spacer = ""
3514- if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
3515- spacer = "-e ssh"
3516- if not repo.mirror.endswith("/"):
3517- repo.mirror = "%s/" % repo.mirror
3518-
3519- # FIXME: wrapper for subprocess that logs to logger
3520- cmd = "rsync -rltDv %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
3521- rc = utils.subprocess_call(self.logger, cmd)
3522-
3523- if rc !=0:
3524- utils.die(self.logger,"cobbler reposync failed")
3525- os.path.walk(dest_path, self.createrepo_walker, repo)
3526- self.create_local_file(dest_path, repo)
3527-
3528- # ====================================================================================
3529-
3530- def rhn_sync(self, repo):
3531-
3532- """
3533- Handle mirroring of RHN repos.
3534- """
3535-
3536- repo_mirror = repo.mirror
3537-
3538- # FIXME? warn about not having yum-utils. We don't want to require it in the package because
3539- # RHEL4 and RHEL5U0 don't have it.
3540-
3541- if not os.path.exists("/usr/bin/reposync"):
3542- utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
3543-
3544- cmd = "" # command to run
3545- has_rpm_list = False # flag indicating not to pull the whole repo
3546-
3547- # detect cases that require special handling
3548-
3549- if repo.rpm_list != "" and repo.rpm_list != []:
3550- has_rpm_list = True
3551-
3552- # create yum config file for use by reposync
3553- # FIXME: don't hardcode
3554- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
3555- temp_path = os.path.join(dest_path, ".origin")
3556-
3557- if not os.path.isdir(temp_path):
3558- # FIXME: there's a chance this might break the RHN D/L case
3559- os.makedirs(temp_path)
3560-
3561- # how we invoke yum-utils depends on whether this is RHN content or not.
3562-
3563-
3564- # this is the somewhat more-complex RHN case.
3565- # NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
3566- if not repo.mirror_locally:
3567- utils.die("rhn:// repos do not work with --mirror-locally=1")
3568-
3569- if has_rpm_list:
3570- self.logger.warning("warning: --rpm-list is not supported for RHN content")
3571- rest = repo.mirror[6:] # everything after rhn://
3572- cmd = "/usr/bin/reposync %s -r %s --download_path=%s" % (self.rflags, rest, "/var/www/cobbler/repo_mirror")
3573- if repo.name != rest:
3574- args = { "name" : repo.name, "rest" : rest }
3575- utils.die(self.logger,"ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository must match the name of the RHN channel" % args)
3576-
3577- if repo.arch == "i386":
3578- # counter-intuitive, but we want the newish kernels too
3579- repo.arch = "i686"
3580-
3581- if repo.arch != "":
3582- cmd = "%s -a %s" % (cmd, repo.arch)
3583-
3584- # now regardless of whether we're doing yumdownloader or reposync
3585- # or whether the repo was http://, ftp://, or rhn://, execute all queued
3586- # commands here. Any failure at any point stops the operation.
3587-
3588- if repo.mirror_locally:
3589- rc = utils.subprocess_call(self.logger, cmd)
3590- # Don't die if reposync fails, it is logged
3591- # if rc !=0:
3592- # utils.die(self.logger,"cobbler reposync failed")
3593-
3594- # some more special case handling for RHN.
3595- # create the config file now, because the directory didn't exist earlier
3596-
3597- temp_file = self.create_local_file(temp_path, repo, output=False)
3598-
3599- # now run createrepo to rebuild the index
3600-
3601- if repo.mirror_locally:
3602- os.path.walk(dest_path, self.createrepo_walker, repo)
3603-
3604- # create the config file the hosts will use to access the repository.
3605-
3606- self.create_local_file(dest_path, repo)
3607-
3608- # ====================================================================================
3609-
3610- def yum_sync(self, repo):
3611-
3612- """
3613- Handle copying of http:// and ftp:// yum repos.
3614- """
3615-
3616- repo_mirror = repo.mirror
3617-
3618- # warn about not having yum-utils. We don't want to require it in the package because
3619- # RHEL4 and RHEL5U0 don't have it.
3620-
3621- if not os.path.exists("/usr/bin/reposync"):
3622- utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
3623-
3624- cmd = "" # command to run
3625- has_rpm_list = False # flag indicating not to pull the whole repo
3626-
3627- # detect cases that require special handling
3628-
3629- if repo.rpm_list != "" and repo.rpm_list != []:
3630- has_rpm_list = True
3631-
3632- # create yum config file for use by reposync
3633- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
3634- temp_path = os.path.join(dest_path, ".origin")
3635-
3636- if not os.path.isdir(temp_path) and repo.mirror_locally:
3637- # FIXME: there's a chance this might break the RHN D/L case
3638- os.makedirs(temp_path)
3639-
3640- # create the config file that yum will use for the copying
3641-
3642- if repo.mirror_locally:
3643- temp_file = self.create_local_file(temp_path, repo, output=False)
3644-
3645- if not has_rpm_list and repo.mirror_locally:
3646- # if we have not requested only certain RPMs, use reposync
3647- cmd = "/usr/bin/reposync %s --config=%s --repoid=%s --download_path=%s" % (self.rflags, temp_file, repo.name, "/var/www/cobbler/repo_mirror")
3648- if repo.arch != "":
3649- if repo.arch == "x86":
3650- repo.arch = "i386" # FIX potential arch errors
3651- if repo.arch == "i386":
3652- # counter-intuitive, but we want the newish kernels too
3653- cmd = "%s -a i686" % (cmd)
3654- else:
3655- cmd = "%s -a %s" % (cmd, repo.arch)
3656-
3657- elif repo.mirror_locally:
3658-
3659- # create the output directory if it doesn't exist
3660- if not os.path.exists(dest_path):
3661- os.makedirs(dest_path)
3662-
3663- use_source = ""
3664- if repo.arch == "src":
3665- use_source = "--source"
3666-
3667- # older yumdownloader sometimes explodes on --resolvedeps
3668- # if this happens to you, upgrade yum & yum-utils
3669- extra_flags = self.settings.yumdownloader_flags
3670- cmd = "/usr/bin/yumdownloader %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (extra_flags, use_source, repo.name, temp_file, dest_path, " ".join(repo.rpm_list))
3671-
3672- # now regardless of whether we're doing yumdownloader or reposync
3673- # or whether the repo was http://, ftp://, or rhn://, execute all queued
3674- # commands here. Any failure at any point stops the operation.
3675-
3676- if repo.mirror_locally:
3677- rc = utils.subprocess_call(self.logger, cmd)
3678- if rc !=0:
3679- utils.die(self.logger,"cobbler reposync failed")
3680-
3681- repodata_path = os.path.join(dest_path, "repodata")
3682-
3683- if not os.path.exists("/usr/bin/wget"):
3684- utils.die(self.logger,"no /usr/bin/wget found, please install wget")
3685-
3686- # grab repomd.xml and use it to download any metadata we can use
3687- cmd2 = "/usr/bin/wget -q %s/repodata/repomd.xml -O %s/repomd.xml" % (repo_mirror, temp_path)
3688- rc = utils.subprocess_call(self.logger,cmd2)
3689- if rc == 0:
3690- # create our repodata directory now, as any extra metadata we're
3691- # about to download probably lives there
3692- if not os.path.isdir(repodata_path):
3693- os.makedirs(repodata_path)
3694- rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
3695- for mdtype in rmd.repoData.keys():
3696- # don't download metadata files that are created by default
3697- if mdtype not in ["primary", "primary_db", "filelists", "filelists_db", "other", "other_db"]:
3698- mdfile = rmd.getData(mdtype).location[1]
3699- cmd3 = "/usr/bin/wget -q %s/%s -O %s/%s" % (repo_mirror, mdfile, dest_path, mdfile)
3700- utils.subprocess_call(self.logger,cmd3)
3701- if rc !=0:
3702- utils.die(self.logger,"wget failed")
3703-
3704- # now run createrepo to rebuild the index
3705-
3706- if repo.mirror_locally:
3707- os.path.walk(dest_path, self.createrepo_walker, repo)
3708-
3709- # create the config file the hosts will use to access the repository.
3710-
3711- self.create_local_file(dest_path, repo)
3712-
3713- # ====================================================================================
3714-
3715-
3716- def apt_sync(self, repo):
3717-
3718- """
3719- Handle copying of http:// and ftp:// debian repos.
3720- """
3721-
3722- repo_mirror = repo.mirror
3723-
3724- # warn about not having mirror program.
3725-
3726- mirror_program = "/usr/bin/debmirror"
3727- if not os.path.exists(mirror_program):
3728- utils.die(self.logger,"no %s found, please install it"%(mirror_program))
3729-
3730- cmd = "" # command to run
3731- has_rpm_list = False # flag indicating not to pull the whole repo
3732-
3733- # detect cases that require special handling
3734-
3735- if repo.rpm_list != "" and repo.rpm_list != []:
3736- utils.die(self.logger,"has_rpm_list not yet supported on apt repos")
3737-
3738- if not repo.arch:
3739- utils.die(self.logger,"Architecture is required for apt repositories")
3740-
3741- # built destination path for the repo
3742- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
3743-
3744- if repo.mirror_locally:
3745- mirror = repo.mirror.replace("@@suite@@",repo.os_version)
3746-
3747- idx = mirror.find("://")
3748- method = mirror[:idx]
3749- mirror = mirror[idx+3:]
3750-
3751- idx = mirror.find("/")
3752- host = mirror[:idx]
3753- mirror = mirror[idx+1:]
3754-
3755- idx = mirror.rfind("/dists/")
3756- suite = mirror[idx+7:]
3757- mirror = mirror[:idx]
3758-
3759- mirror_data = "--method=%s --host=%s --root=%s --dist=%s " % ( method , host , mirror , suite )
3760-
3761- # FIXME : flags should come from repo instead of being hardcoded
3762-
3763- rflags = "--passive --nocleanup"
3764- for x in repo.yumopts:
3765- if repo.yumopts[x]:
3766- rflags += " %s %s" % ( x , repo.yumopts[x] )
3767- else:
3768- rflags += " %s" % x
3769- cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data, dest_path)
3770- if repo.arch == "src":
3771- cmd = "%s --source" % cmd
3772- else:
3773- arch = repo.arch
3774- if arch == "x86":
3775- arch = "i386" # FIX potential arch errors
3776- if arch == "x86_64":
3777- arch = "amd64" # FIX potential arch errors
3778- cmd = "%s --nosource -a %s" % (cmd, arch)
3779-
3780- rc = utils.subprocess_call(self.logger, cmd)
3781- if rc !=0:
3782- utils.die(self.logger,"cobbler reposync failed")
3783-
3784-
3785- def create_local_file(self, dest_path, repo, output=True):
3786- """
3787-
3788- Creates Yum config files for use by reposync
3789-
3790- Two uses:
3791- (A) output=True, Create local files that can be used with yum on provisioned clients to make use of this mirror.
3792- (B) output=False, Create a temporary file for yum to feed into yum for mirroring
3793- """
3794-
3795- # the output case will generate repo configuration files which are usable
3796- # for the installed systems. They need to be made compatible with --server-override
3797- # which means they are actually templates, which need to be rendered by a cobbler-sync
3798- # on per profile/system basis.
3799-
3800- if output:
3801- fname = os.path.join(dest_path,"config.repo")
3802- else:
3803- fname = os.path.join(dest_path, "%s.repo" % repo.name)
3804- self.logger.debug("creating: %s" % fname)
3805- if not os.path.exists(dest_path):
3806- utils.mkdir(dest_path)
3807- config_file = open(fname, "w+")
3808- config_file.write("[%s]\n" % repo.name)
3809- config_file.write("name=%s\n" % repo.name)
3810- optenabled = False
3811- optgpgcheck = False
3812- if output:
3813- if repo.mirror_locally:
3814- line = "baseurl=http://${server}/cobbler/repo_mirror/%s\n" % (repo.name)
3815- else:
3816- mstr = repo.mirror
3817- if mstr.startswith("/"):
3818- mstr = "file://%s" % mstr
3819- line = "baseurl=%s\n" % mstr
3820-
3821- config_file.write(line)
3822- # user may have options specific to certain yum plugins
3823- # add them to the file
3824- for x in repo.yumopts:
3825- config_file.write("%s=%s\n" % (x, repo.yumopts[x]))
3826- if x == "enabled":
3827- optenabled = True
3828- if x == "gpgcheck":
3829- optgpgcheck = True
3830- else:
3831- mstr = repo.mirror
3832- if mstr.startswith("/"):
3833- mstr = "file://%s" % mstr
3834- line = "baseurl=%s\n" % mstr
3835- if self.settings.http_port not in (80, '80'):
3836- http_server = "%s:%s" % (self.settings.server, self.settings.http_port)
3837- else:
3838- http_server = self.settings.server
3839- line = line.replace("@@server@@",http_server)
3840- config_file.write(line)
3841- if not optenabled:
3842- config_file.write("enabled=1\n")
3843- config_file.write("priority=%s\n" % repo.priority)
3844- # FIXME: potentially might want a way to turn this on/off on a per-repo basis
3845- if not optgpgcheck:
3846- config_file.write("gpgcheck=0\n")
3847- config_file.close()
3848- return fname
3849-
3850- # ==================================================================================
3851-
3852- def update_permissions(self, repo_path):
3853- """
3854- Verifies that permissions and contexts after an rsync are as expected.
3855- Sending proper rsync flags should prevent the need for this, though this is largely
3856- a safeguard.
3857- """
3858- # all_path = os.path.join(repo_path, "*")
3859- cmd1 = "chown -R root:www-data %s" % repo_path
3860- utils.subprocess_call(self.logger, cmd1)
3861-
3862- cmd2 = "chmod -R 755 %s" % repo_path
3863- utils.subprocess_call(self.logger, cmd2)
3864-
3865
3866=== removed file '.pc/42_fix_repomirror_create_sync.patch/cobbler/codes.py'
3867--- .pc/42_fix_repomirror_create_sync.patch/cobbler/codes.py 2011-05-02 18:26:03 +0000
3868+++ .pc/42_fix_repomirror_create_sync.patch/cobbler/codes.py 1970-01-01 00:00:00 +0000
3869@@ -1,98 +0,0 @@
3870-
3871-"""
3872-various codes and constants used by Cobbler
3873-
3874-Copyright 2006-2009, Red Hat, Inc
3875-Michael DeHaan <mdehaan@redhat.com>
3876-
3877-This program is free software; you can redistribute it and/or modify
3878-it under the terms of the GNU General Public License as published by
3879-the Free Software Foundation; either version 2 of the License, or
3880-(at your option) any later version.
3881-
3882-This program is distributed in the hope that it will be useful,
3883-but WITHOUT ANY WARRANTY; without even the implied warranty of
3884-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3885-GNU General Public License for more details.
3886-
3887-You should have received a copy of the GNU General Public License
3888-along with this program; if not, write to the Free Software
3889-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
3890-02110-1301 USA
3891-"""
3892-
3893-import utils
3894-
3895-# OS variants table. This is a variance of the data from
3896-# ls /usr/lib/python2.X/site-packages/virtinst/FullVirtGuest.py
3897-# but replicated here as we can't assume cobbler is installed on a system with libvirt.
3898-# in many cases it will not be (i.e. old EL4 server, etc) and we need this info to
3899-# know how to validate --os-variant and --os-version.
3900-#
3901-# The keys of this hash correspond with the --breed flag in Cobbler.
3902-# --breed has physical provisioning semantics as well as virt semantics.
3903-#
3904-# presense of something in this table does /not/ mean it's supported.
3905-# for instance, currently, "redhat", "debian", and "suse" do something interesting.
3906-# the rest are undefined (for now), this will evolve.
3907-
3908-VALID_OS_BREEDS = [
3909- "redhat", "debian", "ubuntu", "suse", "generic", "windows", "unix", "vmware", "other"
3910-]
3911-
3912-VALID_OS_VERSIONS = {
3913- "redhat" : [ "rhel2.1", "rhel3", "rhel4", "rhel5", "rhel6", "fedora5", "fedora6", "fedora7", "fedora8", "fedora9", "fedora10", "fedora11", "fedora12", "fedora13", "fedora14", "generic24", "generic26", "virtio26", "other" ],
3914- "suse" : [ "sles10", "generic24", "generic26", "virtio26", "other" ],
3915- "debian" : [ "etch", "lenny", "squeeze", "sid", "stable", "testing", "unstable", "generic24", "generic26", "other" ],
3916- "ubuntu" : [ "dapper", "hardy", "intrepid", "jaunty", "karmic", "lucid", "maverick", "natty" ],
3917- "generic" : [ "generic24", "generic26", "other" ],
3918- "windows" : [ "winxp", "win2k", "win2k3", "vista", "other" ],
3919- "unix" : [ "solaris9", "solaris10", "freebsd6", "openbsd4", "other" ],
3920- "vmware" : [ "esx4", "esxi4" ],
3921- "other" : [ "msdos", "netware4", "netware5", "netware6", "generic", "other" ]
3922-}
3923-
3924-VALID_REPO_BREEDS = [
3925-# "rsync", "rhn", "yum", "apt"
3926- "rsync", "rhn", "yum"
3927-]
3928-
3929-def uniquify(seq, idfun=None):
3930-
3931- # this is odd (older mod_python scoping bug?) but we can't use
3932- # utils.uniquify here because on older distros (RHEL4/5)
3933- # mod_python gets another utils. As a result,
3934- # it is duplicated here for now. Bad, but ... now you know.
3935- #
3936- # credit: http://www.peterbe.com/plog/uniqifiers-benchmark
3937- # FIXME: if this is actually slower than some other way, overhaul it
3938-
3939- if idfun is None:
3940- def idfun(x):
3941- return x
3942- seen = {}
3943- result = []
3944- for item in seq:
3945- marker = idfun(item)
3946- if marker in seen:
3947- continue
3948- seen[marker] = 1
3949- result.append(item)
3950- return result
3951-
3952-
3953-def get_all_os_versions():
3954- """
3955- Collapse the above list of OS versions for usage/display by the CLI/webapp.
3956- """
3957- results = ['']
3958- for x in VALID_OS_VERSIONS.keys():
3959- for y in VALID_OS_VERSIONS[x]:
3960- results.append(y)
3961-
3962- results = uniquify(results)
3963-
3964- results.sort()
3965- return results
3966-
3967-
3968
3969=== removed directory '.pc/42_fix_repomirror_create_sync.patch/cobbler/modules'
3970=== removed file '.pc/42_fix_repomirror_create_sync.patch/cobbler/modules/manage_import_debian_ubuntu.py'
3971--- .pc/42_fix_repomirror_create_sync.patch/cobbler/modules/manage_import_debian_ubuntu.py 2011-05-02 18:26:03 +0000
3972+++ .pc/42_fix_repomirror_create_sync.patch/cobbler/modules/manage_import_debian_ubuntu.py 1970-01-01 00:00:00 +0000
3973@@ -1,779 +0,0 @@
3974-"""
3975-This is some of the code behind 'cobbler sync'.
3976-
3977-Copyright 2006-2009, Red Hat, Inc
3978-Michael DeHaan <mdehaan@redhat.com>
3979-John Eckersberg <jeckersb@redhat.com>
3980-
3981-This program is free software; you can redistribute it and/or modify
3982-it under the terms of the GNU General Public License as published by
3983-the Free Software Foundation; either version 2 of the License, or
3984-(at your option) any later version.
3985-
3986-This program is distributed in the hope that it will be useful,
3987-but WITHOUT ANY WARRANTY; without even the implied warranty of
3988-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3989-GNU General Public License for more details.
3990-
3991-You should have received a copy of the GNU General Public License
3992-along with this program; if not, write to the Free Software
3993-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
3994-02110-1301 USA
3995-"""
3996-
3997-import os
3998-import os.path
3999-import shutil
4000-import time
4001-import sys
4002-import glob
4003-import traceback
4004-import errno
4005-import re
4006-from utils import popen2
4007-from shlex import shlex
4008-
4009-
4010-import utils
4011-from cexceptions import *
4012-import templar
4013-
4014-import item_distro
4015-import item_profile
4016-import item_repo
4017-import item_system
4018-
4019-from utils import _
4020-
4021-def register():
4022- """
4023- The mandatory cobbler module registration hook.
4024- """
4025- return "manage/import"
4026-
4027-
4028-class ImportDebianUbuntuManager:
4029-
4030- def __init__(self,config,logger):
4031- """
4032- Constructor
4033- """
4034- self.logger = logger
4035- self.config = config
4036- self.api = config.api
4037- self.distros = config.distros()
4038- self.profiles = config.profiles()
4039- self.systems = config.systems()
4040- self.settings = config.settings()
4041- self.repos = config.repos()
4042- self.templar = templar.Templar(config)
4043-
4044- # required function for import modules
4045- def what(self):
4046- return "import/debian_ubuntu"
4047-
4048- # required function for import modules
4049- def check_for_signature(self,path,cli_breed):
4050- signatures = [
4051- 'pool',
4052- ]
4053-
4054- #self.logger.info("scanning %s for a debian/ubuntu distro signature" % path)
4055- for signature in signatures:
4056- d = os.path.join(path,signature)
4057- if os.path.exists(d):
4058- self.logger.info("Found a debian/ubuntu compatible signature: %s" % signature)
4059- return (True,signature)
4060-
4061- if cli_breed and cli_breed in self.get_valid_breeds():
4062- self.logger.info("Warning: No distro signature for kernel at %s, using value from command line" % path)
4063- return (True,None)
4064-
4065- return (False,None)
4066-
4067- # required function for import modules
4068- def run(self,pkgdir,mirror,mirror_name,network_root=None,kickstart_file=None,rsync_flags=None,arch=None,breed=None,os_version=None):
4069- self.pkgdir = pkgdir
4070- self.mirror = mirror
4071- self.mirror_name = mirror_name
4072- self.network_root = network_root
4073- self.kickstart_file = kickstart_file
4074- self.rsync_flags = rsync_flags
4075- self.arch = arch
4076- self.breed = breed
4077- self.os_version = os_version
4078-
4079- # some fixups for the XMLRPC interface, which does not use "None"
4080- if self.arch == "": self.arch = None
4081- if self.mirror == "": self.mirror = None
4082- if self.mirror_name == "": self.mirror_name = None
4083- if self.kickstart_file == "": self.kickstart_file = None
4084- if self.os_version == "": self.os_version = None
4085- if self.rsync_flags == "": self.rsync_flags = None
4086- if self.network_root == "": self.network_root = None
4087-
4088- # If no breed was specified on the command line, figure it out
4089- if self.breed == None:
4090- self.breed = self.get_breed_from_directory()
4091- if not self.breed:
4092- utils.die(self.logger,"import failed - could not determine breed of debian-based distro")
4093-
4094- # debug log stuff for testing
4095- #self.logger.info("DEBUG: self.pkgdir = %s" % str(self.pkgdir))
4096- #self.logger.info("DEBUG: self.mirror = %s" % str(self.mirror))
4097- #self.logger.info("DEBUG: self.mirror_name = %s" % str(self.mirror_name))
4098- #self.logger.info("DEBUG: self.network_root = %s" % str(self.network_root))
4099- #self.logger.info("DEBUG: self.kickstart_file = %s" % str(self.kickstart_file))
4100- #self.logger.info("DEBUG: self.rsync_flags = %s" % str(self.rsync_flags))
4101- #self.logger.info("DEBUG: self.arch = %s" % str(self.arch))
4102- #self.logger.info("DEBUG: self.breed = %s" % str(self.breed))
4103- #self.logger.info("DEBUG: self.os_version = %s" % str(self.os_version))
4104-
4105- # both --import and --name are required arguments
4106-
4107- if self.mirror is None:
4108- utils.die(self.logger,"import failed. no --path specified")
4109- if self.mirror_name is None:
4110- utils.die(self.logger,"import failed. no --name specified")
4111-
4112- # if --arch is supplied, validate it to ensure it's valid
4113-
4114- if self.arch is not None and self.arch != "":
4115- self.arch = self.arch.lower()
4116- if self.arch == "x86":
4117- # be consistent
4118- self.arch = "i386"
4119- if self.arch not in self.get_valid_arches():
4120- utils.die(self.logger,"arch must be one of: %s" % string.join(self.get_valid_arches(),", "))
4121-
4122- # if we're going to do any copying, set where to put things
4123- # and then make sure nothing is already there.
4124-
4125- self.path = os.path.normpath( "%s/ks_mirror/%s" % (self.settings.webdir, self.mirror_name) )
4126- if os.path.exists(self.path) and self.arch is None:
4127- # FIXME : Raise exception even when network_root is given ?
4128- utils.die(self.logger,"Something already exists at this import location (%s). You must specify --arch to avoid potentially overwriting existing files." % self.path)
4129-
4130- # import takes a --kickstart for forcing selection that can't be used in all circumstances
4131-
4132- if self.kickstart_file and not self.breed:
4133- utils.die(self.logger,"Kickstart file can only be specified when a specific breed is selected")
4134-
4135- if self.os_version and not self.breed:
4136- utils.die(self.logger,"OS version can only be specified when a specific breed is selected")
4137-
4138- if self.breed and self.breed.lower() not in self.get_valid_breeds():
4139- utils.die(self.logger,"Supplied import breed is not supported by this module")
4140-
4141- # if --arch is supplied, make sure the user is not importing a path with a different
4142- # arch, which would just be silly.
4143-
4144- if self.arch:
4145- # append the arch path to the name if the arch is not already
4146- # found in the name.
4147- for x in self.get_valid_arches():
4148- if self.path.lower().find(x) != -1:
4149- if self.arch != x :
4150- utils.die(self.logger,"Architecture found on pathname (%s) does not fit the one given in command line (%s)"%(x,self.arch))
4151- break
4152- else:
4153- # FIXME : This is very likely removed later at get_proposed_name, and the guessed arch appended again
4154- self.path += ("-%s" % self.arch)
4155- # If arch is specified we also need to update the mirror name.
4156- self.mirror_name = self.mirror_name + "-" + self.arch
4157-
4158- # make the output path and mirror content but only if not specifying that a network
4159- # accessible support location already exists (this is --available-as on the command line)
4160-
4161- if self.network_root is None:
4162- # we need to mirror (copy) the files
4163-
4164- utils.mkdir(self.path)
4165-
4166- if self.mirror.startswith("http://") or self.mirror.startswith("ftp://") or self.mirror.startswith("nfs://"):
4167-
4168- # http mirrors are kind of primative. rsync is better.
4169- # that's why this isn't documented in the manpage and we don't support them.
4170- # TODO: how about adding recursive FTP as an option?
4171-
4172- utils.die(self.logger,"unsupported protocol")
4173-
4174- else:
4175-
4176- # good, we're going to use rsync..
4177- # we don't use SSH for public mirrors and local files.
4178- # presence of user@host syntax means use SSH
4179-
4180- # kick off the rsync now
4181-
4182- if not utils.rsync_files(self.mirror, self.path, self.rsync_flags, self.logger):
4183- utils.die(self.logger, "failed to rsync the files")
4184-
4185- else:
4186-
4187- # rather than mirroring, we're going to assume the path is available
4188- # over http, ftp, and nfs, perhaps on an external filer. scanning still requires
4189- # --mirror is a filesystem path, but --available-as marks the network path
4190-
4191- if not os.path.exists(self.mirror):
4192- utils.die(self.logger, "path does not exist: %s" % self.mirror)
4193-
4194- # find the filesystem part of the path, after the server bits, as each distro
4195- # URL needs to be calculated relative to this.
4196-
4197- if not self.network_root.endswith("/"):
4198- self.network_root = self.network_root + "/"
4199- self.path = os.path.normpath( self.mirror )
4200- valid_roots = [ "nfs://", "ftp://", "http://" ]
4201- for valid_root in valid_roots:
4202- if self.network_root.startswith(valid_root):
4203- break
4204- else:
4205- utils.die(self.logger, "Network root given to --available-as must be nfs://, ftp://, or http://")
4206- if self.network_root.startswith("nfs://"):
4207- try:
4208- (a,b,rest) = self.network_root.split(":",3)
4209- except:
4210- utils.die(self.logger, "Network root given to --available-as is missing a colon, please see the manpage example.")
4211-
4212- # now walk the filesystem looking for distributions that match certain patterns
4213-
4214- self.logger.info("adding distros")
4215- distros_added = []
4216- # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
4217- os.path.walk(self.path, self.distro_adder, distros_added)
4218-
4219- # find out if we can auto-create any repository records from the install tree
4220-
4221- if self.network_root is None:
4222- self.logger.info("associating repos")
4223- # FIXME: this automagic is not possible (yet) without mirroring
4224- self.repo_finder(distros_added)
4225-
4226- # find the most appropriate answer files for each profile object
4227-
4228- self.logger.info("associating kickstarts")
4229- self.kickstart_finder(distros_added)
4230-
4231- # ensure bootloaders are present
4232- self.api.pxegen.copy_bootloaders()
4233-
4234- return True
4235-
4236- # required function for import modules
4237- def get_valid_arches(self):
4238- return ["i386", "ppc", "x86_64", "x86",]
4239-
4240- # required function for import modules
4241- def get_valid_breeds(self):
4242- return ["debian","ubuntu"]
4243-
4244- # required function for import modules
4245- def get_valid_os_versions(self):
4246- if self.breed == "debian":
4247- return ["etch", "lenny", "squeeze", "sid", "stable", "testing", "unstable", "experimental",]
4248- elif self.breed == "ubuntu":
4249- return ["dapper", "hardy", "karmic", "lucid", "maverick", "natty",]
4250- else:
4251- return []
4252-
4253- def get_valid_repo_breeds(self):
4254- return ["apt",]
4255-
4256- def get_release_files(self):
4257- """
4258- Find distro release packages.
4259- """
4260- return glob.glob(os.path.join(self.get_rootdir(), "dists/*"))
4261-
4262- def get_breed_from_directory(self):
4263- for breed in self.get_valid_breeds():
4264- # NOTE : Although we break the loop after the first match,
4265- # multiple debian derived distros can actually live at the same pool -- JP
4266- d = os.path.join(self.mirror, breed)
4267- if (os.path.islink(d) and os.path.isdir(d) and os.path.realpath(d) == os.path.realpath(self.mirror)) or os.path.basename(self.mirror) == breed:
4268- return breed
4269- else:
4270- return None
4271-
4272- def get_tree_location(self, distro):
4273- """
4274- Once a distribution is identified, find the part of the distribution
4275- that has the URL in it that we want to use for kickstarting the
4276- distribution, and create a ksmeta variable $tree that contains this.
4277- """
4278-
4279- base = self.get_rootdir()
4280-
4281- if self.network_root is None:
4282- dists_path = os.path.join(self.path, "dists")
4283- if os.path.isdir(dists_path):
4284- tree = "http://@@http_server@@/cblr/ks_mirror/%s" % (self.mirror_name)
4285- else:
4286- tree = "http://@@http_server@@/cblr/repo_mirror/%s" % (distro.name)
4287- self.set_install_tree(distro, tree)
4288- else:
4289- # where we assign the kickstart source is relative to our current directory
4290- # and the input start directory in the crawl. We find the path segments
4291- # between and tack them on the network source path to find the explicit
4292- # network path to the distro that Anaconda can digest.
4293- tail = self.path_tail(self.path, base)
4294- tree = self.network_root[:-1] + tail
4295- self.set_install_tree(distro, tree)
4296-
4297- return
4298-
4299- def repo_finder(self, distros_added):
4300- for distro in distros_added:
4301- self.logger.info("traversing distro %s" % distro.name)
4302- # FIXME : Shouldn't decide this the value of self.network_root ?
4303- if distro.kernel.find("ks_mirror") != -1:
4304- basepath = os.path.dirname(distro.kernel)
4305- top = self.get_rootdir()
4306- self.logger.info("descent into %s" % top)
4307- dists_path = os.path.join(self.path, "dists")
4308- if not os.path.isdir(dists_path):
4309- self.process_repos()
4310- else:
4311- self.logger.info("this distro isn't mirrored")
4312-
4313- def process_repos(self):
4314- pass
4315-
4316- def distro_adder(self,distros_added,dirname,fnames):
4317- """
4318- This is an os.path.walk routine that finds distributions in the directory
4319- to be scanned and then creates them.
4320- """
4321-
4322- # FIXME: If there are more than one kernel or initrd image on the same directory,
4323- # results are unpredictable
4324-
4325- initrd = None
4326- kernel = None
4327-
4328- for x in fnames:
4329- adtls = []
4330-
4331- fullname = os.path.join(dirname,x)
4332- if os.path.islink(fullname) and os.path.isdir(fullname):
4333- if fullname.startswith(self.path):
4334- self.logger.warning("avoiding symlink loop")
4335- continue
4336- self.logger.info("following symlink: %s" % fullname)
4337- os.path.walk(fullname, self.distro_adder, distros_added)
4338-
4339- if ( x.startswith("initrd.gz") ) and x != "initrd.size":
4340- initrd = os.path.join(dirname,x)
4341- if ( x.startswith("linux") ) and x.find("initrd") == -1:
4342- kernel = os.path.join(dirname,x)
4343-
4344- # if we've collected a matching kernel and initrd pair, turn the in and add them to the list
4345- if initrd is not None and kernel is not None:
4346- adtls.append(self.add_entry(dirname,kernel,initrd))
4347- kernel = None
4348- initrd = None
4349-
4350- for adtl in adtls:
4351- distros_added.extend(adtl)
4352-
4353- def add_entry(self,dirname,kernel,initrd):
4354- """
4355- When we find a directory with a valid kernel/initrd in it, create the distribution objects
4356- as appropriate and save them. This includes creating xen and rescue distros/profiles
4357- if possible.
4358- """
4359-
4360- proposed_name = self.get_proposed_name(dirname,kernel)
4361- proposed_arch = self.get_proposed_arch(dirname)
4362-
4363- if self.arch and proposed_arch and self.arch != proposed_arch:
4364- utils.die(self.logger,"Arch from pathname (%s) does not match with supplied one %s"%(proposed_arch,self.arch))
4365-
4366- archs = self.learn_arch_from_tree()
4367- if not archs:
4368- if self.arch:
4369- archs.append( self.arch )
4370- else:
4371- if self.arch and self.arch not in archs:
4372- utils.die(self.logger, "Given arch (%s) not found on imported tree %s"%(self.arch,self.get_pkgdir()))
4373- if proposed_arch:
4374- if archs and proposed_arch not in archs:
4375- self.logger.warning("arch from pathname (%s) not found on imported tree %s" % (proposed_arch,self.get_pkgdir()))
4376- return
4377-
4378- archs = [ proposed_arch ]
4379-
4380- if len(archs)>1:
4381- self.logger.warning("- Warning : Multiple archs found : %s" % (archs))
4382-
4383- distros_added = []
4384-
4385- for pxe_arch in archs:
4386- name = proposed_name + "-" + pxe_arch
4387- existing_distro = self.distros.find(name=name)
4388-
4389- if existing_distro is not None:
4390- self.logger.warning("skipping import, as distro name already exists: %s" % name)
4391- continue
4392-
4393- else:
4394- self.logger.info("creating new distro: %s" % name)
4395- distro = self.config.new_distro()
4396-
4397- if name.find("-autoboot") != -1:
4398- # this is an artifact of some EL-3 imports
4399- continue
4400-
4401- distro.set_name(name)
4402- distro.set_kernel(kernel)
4403- distro.set_initrd(initrd)
4404- distro.set_arch(pxe_arch)
4405- distro.set_breed(self.breed)
4406- # If a version was supplied on command line, we set it now
4407- if self.os_version:
4408- distro.set_os_version(self.os_version)
4409-
4410- self.distros.add(distro,save=True)
4411- distros_added.append(distro)
4412-
4413- existing_profile = self.profiles.find(name=name)
4414-
4415- # see if the profile name is already used, if so, skip it and
4416- # do not modify the existing profile
4417-
4418- if existing_profile is None:
4419- self.logger.info("creating new profile: %s" % name)
4420- #FIXME: The created profile holds a default kickstart, and should be breed specific
4421- profile = self.config.new_profile()
4422- else:
4423- self.logger.info("skipping existing profile, name already exists: %s" % name)
4424- continue
4425-
4426- # save our minimal profile which just points to the distribution and a good
4427- # default answer file
4428-
4429- profile.set_name(name)
4430- profile.set_distro(name)
4431- profile.set_kickstart(self.kickstart_file)
4432-
4433- # depending on the name of the profile we can define a good virt-type
4434- # for usage with koan
4435-
4436- if name.find("-xen") != -1:
4437- profile.set_virt_type("xenpv")
4438- elif name.find("vmware") != -1:
4439- profile.set_virt_type("vmware")
4440- else:
4441- profile.set_virt_type("qemu")
4442-
4443- # save our new profile to the collection
4444-
4445- self.profiles.add(profile,save=True)
4446-
4447- return distros_added
4448-
4449- def get_proposed_name(self,dirname,kernel=None):
4450- """
4451- Given a directory name where we have a kernel/initrd pair, try to autoname
4452- the distribution (and profile) object based on the contents of that path
4453- """
4454-
4455- if self.network_root is not None:
4456- name = self.mirror_name + "-".join(self.path_tail(os.path.dirname(self.path),dirname).split("/"))
4457- else:
4458- # remove the part that says /var/www/cobbler/ks_mirror/name
4459- name = "-".join(dirname.split("/")[5:])
4460-
4461- if kernel is not None and kernel.find("PAE") != -1:
4462- name = name + "-PAE"
4463-
4464- # These are all Ubuntu's doing, the netboot images are buried pretty
4465- # deep. ;-) -JC
4466- name = name.replace("-netboot","")
4467- name = name.replace("-ubuntu-installer","")
4468- name = name.replace("-amd64","")
4469- name = name.replace("-i386","")
4470-
4471- # we know that some kernel paths should not be in the name
4472-
4473- name = name.replace("-images","")
4474- name = name.replace("-pxeboot","")
4475- name = name.replace("-install","")
4476- name = name.replace("-isolinux","")
4477-
4478- # some paths above the media root may have extra path segments we want
4479- # to clean up
4480-
4481- name = name.replace("-os","")
4482- name = name.replace("-tree","")
4483- name = name.replace("var-www-cobbler-", "")
4484- name = name.replace("ks_mirror-","")
4485- name = name.replace("--","-")
4486-
4487- # remove any architecture name related string, as real arch will be appended later
4488-
4489- name = name.replace("chrp","ppc64")
4490-
4491- for separator in [ '-' , '_' , '.' ] :
4492- for arch in [ "i386" , "x86_64" , "ia64" , "ppc64", "ppc32", "ppc", "x86" , "s390x", "s390" , "386" , "amd" ]:
4493- name = name.replace("%s%s" % ( separator , arch ),"")
4494-
4495- return name
4496-
4497- def get_proposed_arch(self,dirname):
4498- """
4499- Given an directory name, can we infer an architecture from a path segment?
4500- """
4501- if dirname.find("x86_64") != -1 or dirname.find("amd") != -1:
4502- return "x86_64"
4503- if dirname.find("ia64") != -1:
4504- return "ia64"
4505- if dirname.find("i386") != -1 or dirname.find("386") != -1 or dirname.find("x86") != -1:
4506- return "i386"
4507- if dirname.find("s390x") != -1:
4508- return "s390x"
4509- if dirname.find("s390") != -1:
4510- return "s390"
4511- if dirname.find("ppc64") != -1 or dirname.find("chrp") != -1:
4512- return "ppc64"
4513- if dirname.find("ppc32") != -1:
4514- return "ppc"
4515- if dirname.find("ppc") != -1:
4516- return "ppc"
4517- return None
4518-
4519- def arch_walker(self,foo,dirname,fnames):
4520- """
4521- See docs on learn_arch_from_tree.
4522-
4523- The TRY_LIST is used to speed up search, and should be dropped for default importer
4524- Searched kernel names are kernel-header, linux-headers-, kernel-largesmp, kernel-hugemem
4525-
4526- This method is useful to get the archs, but also to package type and a raw guess of the breed
4527- """
4528-
4529- # try to find a kernel header RPM and then look at it's arch.
4530- for x in fnames:
4531- if self.match_kernelarch_file(x):
4532- for arch in self.get_valid_arches():
4533- if x.find(arch) != -1:
4534- foo[arch] = 1
4535- for arch in [ "i686" , "amd64" ]:
4536- if x.find(arch) != -1:
4537- foo[arch] = 1
4538-
4539- def kickstart_finder(self,distros_added):
4540- """
4541- For all of the profiles in the config w/o a kickstart, use the
4542- given kickstart file, or look at the kernel path, from that,
4543- see if we can guess the distro, and if we can, assign a kickstart
4544- if one is available for it.
4545- """
4546- for profile in self.profiles:
4547- distro = self.distros.find(name=profile.get_conceptual_parent().name)
4548- if distro is None or not (distro in distros_added):
4549- continue
4550-
4551- kdir = os.path.dirname(distro.kernel)
4552- if self.kickstart_file == None:
4553- for file in self.get_release_files():
4554- results = self.scan_pkg_filename(file)
4555- # FIXME : If os is not found on tree but set with CLI, no kickstart is searched
4556- if results is None:
4557- self.logger.warning("skipping %s" % file)
4558- continue
4559- (flavor, major, minor, release) = results
4560- # Why use set_variance()? scan_pkg_filename() does everything we need now - jcammarata
4561- #version , ks = self.set_variance(flavor, major, minor, distro.arch)
4562- if self.os_version:
4563- if self.os_version != flavor:
4564- utils.die(self.logger,"CLI version differs from tree : %s vs. %s" % (self.os_version,flavor))
4565- distro.set_comment("%s %s (%s.%s.%s) %s" % (self.breed,flavor,major,minor,release,self.arch))
4566- distro.set_os_version(flavor)
4567- # is this even valid for debian/ubuntu? - jcammarata
4568- #ds = self.get_datestamp()
4569- #if ds is not None:
4570- # distro.set_tree_build_time(ds)
4571- profile.set_kickstart("/var/lib/cobbler/kickstarts/sample.seed")
4572- self.profiles.add(profile,save=True)
4573-
4574- self.configure_tree_location(distro)
4575- self.distros.add(distro,save=True) # re-save
4576- self.api.serialize()
4577-
4578- def configure_tree_location(self, distro):
4579- """
4580- Once a distribution is identified, find the part of the distribution
4581- that has the URL in it that we want to use for kickstarting the
4582- distribution, and create a ksmeta variable $tree that contains this.
4583- """
4584-
4585- base = self.get_rootdir()
4586-
4587- if self.network_root is None:
4588- dists_path = os.path.join( self.path , "dists" )
4589- if os.path.isdir( dists_path ):
4590- tree = "http://@@http_server@@/cblr/ks_mirror/%s" % (self.mirror_name)
4591- else:
4592- tree = "http://@@http_server@@/cblr/repo_mirror/%s" % (distro.name)
4593- self.set_install_tree(distro, tree)
4594- else:
4595- # where we assign the kickstart source is relative to our current directory
4596- # and the input start directory in the crawl. We find the path segments
4597- # between and tack them on the network source path to find the explicit
4598- # network path to the distro that Anaconda can digest.
4599- tail = utils.path_tail(self.path, base)
4600- tree = self.network_root[:-1] + tail
4601- self.set_install_tree(distro, tree)
4602-
4603- def get_rootdir(self):
4604- return self.mirror
4605-
4606- def get_pkgdir(self):
4607- if not self.pkgdir:
4608- return None
4609- return os.path.join(self.get_rootdir(),self.pkgdir)
4610-
4611- def set_install_tree(self, distro, url):
4612- distro.ks_meta["tree"] = url
4613-
4614- def learn_arch_from_tree(self):
4615- """
4616- If a distribution is imported from DVD, there is a good chance the path doesn't
4617- contain the arch and we should add it back in so that it's part of the
4618- meaningful name ... so this code helps figure out the arch name. This is important
4619- for producing predictable distro names (and profile names) from differing import sources
4620- """
4621- result = {}
4622- # FIXME : this is called only once, should not be a walk
4623- if self.get_pkgdir():
4624- os.path.walk(self.get_pkgdir(), self.arch_walker, result)
4625- if result.pop("amd64",False):
4626- result["x86_64"] = 1
4627- if result.pop("i686",False):
4628- result["i386"] = 1
4629- return result.keys()
4630-
4631- def match_kernelarch_file(self, filename):
4632- """
4633- Is the given filename a kernel filename?
4634- """
4635- if not filename.endswith("deb"):
4636- return False
4637- if filename.startswith("linux-headers-"):
4638- return True
4639- return False
4640-
4641- def scan_pkg_filename(self, file):
4642- """
4643- Determine what the distro is based on the release package filename.
4644- """
4645- # FIXME: all of these dist_names should probably be put in a function
4646- # which would be called in place of looking in codes.py. Right now
4647- # you have to update both codes.py and this to add a new release
4648- if self.breed == "debian":
4649- dist_names = ['etch','lenny',]
4650- elif self.breed == "ubuntu":
4651- dist_names = ['dapper','hardy','intrepid','jaunty','karmic','lynx','maverick','natty',]
4652- else:
4653- return None
4654-
4655- if os.path.basename(file) in dist_names:
4656- release_file = os.path.join(file,'Release')
4657- self.logger.info("Found %s release file: %s" % (self.breed,release_file))
4658-
4659- f = open(release_file,'r')
4660- lines = f.readlines()
4661- f.close()
4662-
4663- for line in lines:
4664- if line.lower().startswith('version: '):
4665- version = line.split(':')[1].strip()
4666- values = version.split('.')
4667- if len(values) == 1:
4668- # I don't think you'd ever hit this currently with debian or ubuntu,
4669- # just including it for safety reasons
4670- return (os.path.basename(file), values[0], "0", "0")
4671- elif len(values) == 2:
4672- return (os.path.basename(file), values[0], values[1], "0")
4673- elif len(values) > 2:
4674- return (os.path.basename(file), values[0], values[1], values[2])
4675- return None
4676-
4677- def get_datestamp(self):
4678- """
4679- Not used for debian/ubuntu... should probably be removed? - jcammarata
4680- """
4681- pass
4682-
4683- def set_variance(self, flavor, major, minor, arch):
4684- """
4685- Set distro specific versioning.
4686- """
4687- # I don't think this is required anymore, as the scan_pkg_filename() function
4688- # above does everything we need it to - jcammarata
4689- #
4690- #if self.breed == "debian":
4691- # dist_names = { '4.0' : "etch" , '5.0' : "lenny" }
4692- # dist_vers = "%s.%s" % ( major , minor )
4693- # os_version = dist_names[dist_vers]
4694- #
4695- # return os_version , "/var/lib/cobbler/kickstarts/sample.seed"
4696- #elif self.breed == "ubuntu":
4697- # # Release names taken from wikipedia
4698- # dist_names = { '6.4' :"dapper",
4699- # '8.4' :"hardy",
4700- # '8.10' :"intrepid",
4701- # '9.4' :"jaunty",
4702- # '9.10' :"karmic",
4703- # '10.4' :"lynx",
4704- # '10.10':"maverick",
4705- # '11.4' :"natty",
4706- # }
4707- # dist_vers = "%s.%s" % ( major , minor )
4708- # if not dist_names.has_key( dist_vers ):
4709- # dist_names['4ubuntu2.0'] = "IntrepidIbex"
4710- # os_version = dist_names[dist_vers]
4711- #
4712- # return os_version , "/var/lib/cobbler/kickstarts/sample.seed"
4713- #else:
4714- # return None
4715- pass
4716-
4717- def process_repos(self, main_importer, distro):
4718- # Create a disabled repository for the new distro, and the security updates
4719- #
4720- # NOTE : We cannot use ks_meta nor os_version because they get fixed at a later stage
4721-
4722- repo = item_repo.Repo(main_importer.config)
4723- repo.set_breed( "apt" )
4724- repo.set_arch( distro.arch )
4725- repo.set_keep_updated( False )
4726- repo.yumopts["--ignore-release-gpg"] = None
4727- repo.yumopts["--verbose"] = None
4728- repo.set_name( distro.name )
4729- repo.set_os_version( distro.os_version )
4730- # NOTE : The location of the mirror should come from timezone
4731- repo.set_mirror( "http://ftp.%s.debian.org/debian/dists/%s" % ( 'us' , '@@suite@@' ) )
4732-
4733- security_repo = item_repo.Repo(main_importer.config)
4734- security_repo.set_breed( "apt" )
4735- security_repo.set_arch( distro.arch )
4736- security_repo.set_keep_updated( False )
4737- security_repo.yumopts["--ignore-release-gpg"] = None
4738- security_repo.yumopts["--verbose"] = None
4739- security_repo.set_name( distro.name + "-security" )
4740- security_repo.set_os_version( distro.os_version )
4741- # There are no official mirrors for security updates
4742- security_repo.set_mirror( "http://security.debian.org/debian-security/dists/%s/updates" % '@@suite@@' )
4743-
4744- self.logger.info("Added repos for %s" % distro.name)
4745- repos = main_importer.config.repos()
4746- repos.add(repo,save=True)
4747- repos.add(security_repo,save=True)
4748-
4749-# ==========================================================================
4750-
4751-def get_import_manager(config,logger):
4752- return ImportDebianUbuntuManager(config,logger)
4753
4754=== removed directory '.pc/43_fix_reposync_env_variable.patch'
4755=== removed directory '.pc/43_fix_reposync_env_variable.patch/cobbler'
4756=== removed file '.pc/43_fix_reposync_env_variable.patch/cobbler/action_reposync.py'
4757--- .pc/43_fix_reposync_env_variable.patch/cobbler/action_reposync.py 2011-06-08 17:21:45 +0000
4758+++ .pc/43_fix_reposync_env_variable.patch/cobbler/action_reposync.py 1970-01-01 00:00:00 +0000
4759@@ -1,572 +0,0 @@
4760-"""
4761-Builds out and synchronizes yum repo mirrors.
4762-Initial support for rsync, perhaps reposync coming later.
4763-
4764-Copyright 2006-2007, Red Hat, Inc
4765-Michael DeHaan <mdehaan@redhat.com>
4766-
4767-This program is free software; you can redistribute it and/or modify
4768-it under the terms of the GNU General Public License as published by
4769-the Free Software Foundation; either version 2 of the License, or
4770-(at your option) any later version.
4771-
4772-This program is distributed in the hope that it will be useful,
4773-but WITHOUT ANY WARRANTY; without even the implied warranty of
4774-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4775-GNU General Public License for more details.
4776-
4777-You should have received a copy of the GNU General Public License
4778-along with this program; if not, write to the Free Software
4779-Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
4780-02110-1301 USA
4781-"""
4782-
4783-import os
4784-import os.path
4785-import time
4786-import yaml # Howell-Clark version
4787-import sys
4788-HAS_YUM = True
4789-try:
4790- import yum
4791-except:
4792- HAS_YUM = False
4793-
4794-import utils
4795-from cexceptions import *
4796-import traceback
4797-import errno
4798-from utils import _
4799-import clogger
4800-
4801-class RepoSync:
4802- """
4803- Handles conversion of internal state to the tftpboot tree layout
4804- """
4805-
4806- # ==================================================================================
4807-
4808- def __init__(self,config,tries=1,nofail=False,logger=None):
4809- """
4810- Constructor
4811- """
4812- self.verbose = True
4813- self.api = config.api
4814- self.config = config
4815- self.distros = config.distros()
4816- self.profiles = config.profiles()
4817- self.systems = config.systems()
4818- self.settings = config.settings()
4819- self.repos = config.repos()
4820- self.rflags = self.settings.reposync_flags
4821- self.tries = tries
4822- self.nofail = nofail
4823- self.logger = logger
4824-
4825- if logger is None:
4826- self.logger = clogger.Logger()
4827-
4828- self.logger.info("hello, reposync")
4829-
4830-
4831- # ===================================================================
4832-
4833- def run(self, name=None, verbose=True):
4834- """
4835- Syncs the current repo configuration file with the filesystem.
4836- """
4837-
4838- self.logger.info("run, reposync, run!")
4839-
4840- try:
4841- self.tries = int(self.tries)
4842- except:
4843- utils.die(self.logger,"retry value must be an integer")
4844-
4845- self.verbose = verbose
4846-
4847- report_failure = False
4848- for repo in self.repos:
4849-
4850- env = repo.environment
4851-
4852- for k in env.keys():
4853- self.logger.info("environment: %s=%s" % (k,env[k]))
4854- if env[k] is not None:
4855- os.putenv(k,env[k])
4856-
4857- if name is not None and repo.name != name:
4858- # invoked to sync only a specific repo, this is not the one
4859- continue
4860- elif name is None and not repo.keep_updated:
4861- # invoked to run against all repos, but this one is off
4862- self.logger.info("%s is set to not be updated" % repo.name)
4863- continue
4864-
4865- repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
4866- repo_path = os.path.join(repo_mirror, repo.name)
4867- mirror = repo.mirror
4868-
4869- if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
4870- os.makedirs(repo_path)
4871-
4872- # which may actually NOT reposync if the repo is set to not mirror locally
4873- # but that's a technicality
4874-
4875- for x in range(self.tries+1,1,-1):
4876- success = False
4877- try:
4878- self.sync(repo)
4879- success = True
4880- except:
4881- utils.log_exc(self.logger)
4882- self.logger.warning("reposync failed, tries left: %s" % (x-2))
4883-
4884- if not success:
4885- report_failure = True
4886- if not self.nofail:
4887- utils.die(self.logger,"reposync failed, retry limit reached, aborting")
4888- else:
4889- self.logger.error("reposync failed, retry limit reached, skipping")
4890-
4891- self.update_permissions(repo_path)
4892-
4893- if report_failure:
4894- utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")
4895-
4896- return True
4897-
4898- # ==================================================================================
4899-
4900- def sync(self, repo):
4901-
4902- """
4903- Conditionally sync a repo, based on type.
4904- """
4905-
4906- if repo.breed == "rhn":
4907- return self.rhn_sync(repo)
4908- elif repo.breed == "yum":
4909- return self.yum_sync(repo)
4910- elif repo.breed == "apt":
4911- return self.apt_sync(repo)
4912- elif repo.breed == "rsync":
4913- return self.rsync_sync(repo)
4914- else:
4915- utils.die(self.logger,"unable to sync repo (%s), unknown or unsupported repo type (%s)" % (repo.name, repo.breed))
4916-
4917- # ====================================================================================
4918-
4919- def createrepo_walker(self, repo, dirname, fnames):
4920- """
4921- Used to run createrepo on a copied Yum mirror.
4922- """
4923- if os.path.exists(dirname) or repo['breed'] == 'rsync':
4924- utils.remove_yum_olddata(dirname)
4925-
4926- # add any repo metadata we can use
4927- mdoptions = []
4928- if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
4929- if not HAS_YUM:
4930- utils.die(self.logger,"yum is required to use this feature")
4931-
4932- rmd = yum.repoMDObject.RepoMD('', "%s/.origin/repomd.xml" % (dirname))
4933- if rmd.repoData.has_key("group"):
4934- groupmdfile = rmd.getData("group").location[1]
4935- mdoptions.append("-g %s" % groupmdfile)
4936- if rmd.repoData.has_key("prestodelta"):
4937- # need createrepo >= 0.9.7 to add deltas
4938- if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
4939- cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
4940- createrepo_ver = utils.subprocess_get(self.logger, cmd)
4941- if createrepo_ver >= "0.9.7":
4942- mdoptions.append("--deltas")
4943- else:
4944- self.logger.error("this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.")
4945-
4946- blended = utils.blender(self.api, False, repo)
4947- flags = blended.get("createrepo_flags","(ERROR: FLAGS)")
4948- try:
4949- # BOOKMARK
4950- cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
4951- utils.subprocess_call(self.logger, cmd)
4952- except:
4953- utils.log_exc(self.logger)
4954- self.logger.error("createrepo failed.")
4955- del fnames[:] # we're in the right place
4956-
4957- # ====================================================================================
4958-
4959- def rsync_sync(self, repo):
4960-
4961- """
4962- Handle copying of rsync:// and rsync-over-ssh repos.
4963- """
4964-
4965- repo_mirror = repo.mirror
4966-
4967- if not repo.mirror_locally:
4968- utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")
4969-
4970- if repo.rpm_list != "" and repo.rpm_list != []:
4971- self.logger.warning("--rpm-list is not supported for rsync'd repositories")
4972-
4973- # FIXME: don't hardcode
4974- dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
4975-
4976- spacer = ""
4977- if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
4978- spacer = "-e ssh"
4979- if not repo.mirror.endswith("/"):
4980- repo.mirror = "%s/" % repo.mirror
4981-
4982- # FIXME: wrapper for subprocess that logs to logger
4983- cmd = "rsync -rltDv %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
4984- rc = utils.subprocess_call(self.logger, cmd)
4985-
4986- if rc !=0:
4987- utils.die(self.logger,"cobbler reposync failed")
4988- os.path.walk(dest_path, self.createrepo_walker, repo)
4989- self.create_local_file(dest_path, repo)
4990-
4991- # ====================================================================================
4992-
4993- def rhn_sync(self, repo):
4994-
4995- """
4996- Handle mirroring of RHN repos.
4997- """
4998-
4999- repo_mirror = repo.mirror
5000-
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: