Dan Watkins has proposed merging ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial.
Requested reviews: cloud-init commiters (cloud-init-dev) Related bugs: Bug #1639263 in cloud-init: " cloud-init Unknown network_data link type: macvtap" https://bugs.launchpad.net/cloud-init/+bug/1639263 Bug #1818032 in cloud-init: "sysconfig renders BOOTPROTO=dhcp even if dhcp=false in v2 network-config" https://bugs.launchpad.net/cloud-init/+bug/1818032 Bug #1818571 in cloud-init: "cloud-init clean removes seed directory even when --seed is not specified" https://bugs.launchpad.net/cloud-init/+bug/1818571 For more details, see: https://code.launchpad.net/~daniel-thewatkins/cloud-init/+git/cloud-init/+merge/364127 -- Your team cloud-init commiters is requested to review the proposed merge of ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/xenial into cloud-init:ubuntu/xenial.
diff --git a/.gitignore b/.gitignore index 75565ed..80c509e 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ prime stage *.snap *.cover +.idea/ diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index de22f7f..30e49de 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -5,12 +5,13 @@ """Define 'clean' utility and handler as part of cloud-init commandline.""" import argparse +import glob import os import sys from cloudinit.stages import Init from cloudinit.util import ( - ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, + ProcessExecutionError, del_dir, del_file, get_config_logfiles, is_link, subp) @@ -61,18 +62,18 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - with chdir(init.paths.cloud_dir): - for path in os.listdir('.'): - if path == 'seed' and not remove_seed: - continue - try: - if os.path.isdir(path) and not is_link(path): - del_dir(path) - else: - del_file(path) - except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) - return 1 + seed_path = os.path.join(init.paths.cloud_dir, 'seed') + for path in glob.glob('%s/*' % init.paths.cloud_dir): + if path == seed_path and not remove_seed: + continue + try: + if os.path.isdir(path) and not is_link(path): + del_dir(path) + else: + del_file(path) + except OSError as e: + error('Could not remove {0}: {1}'.format(path, str(e))) + return 1 return 0 diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 5a3ec3b..f092ab3 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -22,7 +22,8 @@ class TestClean(CiTestCase): class FakeInit(object): cfg = {'def_log_file': self.log1, 'output': {'all': '|tee -a {0}'.format(self.log2)}} - paths = mypaths(cloud_dir=self.artifact_dir) + # Ensure cloud_dir has a trailing slash, to match real behaviour + paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir)) def __init__(self, ds_deps): pass @@ -136,7 +137,8 @@ class TestClean(CiTestCase): clean.remove_artifacts, remove_logs=False) self.assertEqual(1, retcode) self.assertEqual( - 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) + 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + m_stderr.getvalue()) def test_handle_clean_args_reboots(self): """handle_clean_args_reboots when reboot arg is provided.""" diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index cdf28cd..459332a 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -49,7 +49,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -59,7 +59,7 @@ def handle(_name, cfg, _cloud, log, _args): elif apt_pipe_value_s in [str(b) for b in range(0, 6)]: write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: - log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) + log.warn("Invalid option for apt_pipelining: %s", apt_pipe_value) def write_apt_snippet(setting, log, f_name): diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 46abedd..a624030 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -51,6 +51,7 @@ file). chef: client_key: + encrypted_data_bag_secret: environment: file_backup_path: file_cache_path: @@ -114,6 +115,7 @@ CHEF_RB_TPL_DEFAULTS = { 'file_backup_path': "/var/backups/chef", 'pid_file': "/var/run/chef/client.pid", 'show_time': True, + 'encrypted_data_bag_secret': None, } CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) CHEF_RB_TPL_PATH_KEYS = frozenset([ @@ -124,6 +126,7 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([ 'json_attribs', 'file_cache_path', 'pid_file', + 'encrypted_data_bag_secret', ]) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 27d2366..22b1753 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -203,7 +203,7 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P<proto>[@]{0,2})' - r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' + r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' r'([:](?P<port>[0-9]+))?$') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 080a6d0..807c3ee 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -367,7 +367,7 @@ def handle_schema_args(name, args): if not args.annotate: error(str(e)) except RuntimeError as e: - error(str(e)) + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py new file mode 100644 index 0000000..2a6bb10 --- /dev/null +++ b/cloudinit/config/tests/test_apt_pipelining.py @@ -0,0 +1,28 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_apt_pipelining handler""" + +import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining + +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestAptPipelining(CiTestCase): + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_not_disabled_by_default(self, m_write_file): + """ensure that default behaviour is to not disable pipelining""" + cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None) + self.assertEqual(0, m_write_file.call_count) + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_false_disables_pipelining(self, m_write_file): + """ensure that pipelining can be disabled with correct config""" + cc_apt_pipelining.handle( + 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None) + self.assertEqual(1, m_write_file.call_count) + args, _ = m_write_file.call_args + self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0]) + self.assertIn('Pipeline-Depth "0"', args[1]) + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ef618c2..20c994d 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -577,11 +577,16 @@ class Distro(object): """ Lock the password of a user, i.e., disable password logins """ + # passwd must use short '-l' due to SLES11 lacking long form '--lock' + lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name]) try: - # Need to use the short option name '-l' instead of '--lock' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. - util.subp(['passwd', '-l', name]) + cmd = next(l for l in lock_tools if util.which(l[0])) + except StopIteration: + raise RuntimeError(( + "Unable to lock user account '%s'. No tools available. " + " Tried: %s.") % (name, [c[0] for c in lock_tools])) + try: + util.subp(cmd) except Exception as e: util.logexc(LOG, 'Failed to disable password for user %s', name) raise e diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 83fb072..003cad6 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -89,7 +89,7 @@ def _has_suitable_upstart(): util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) return True except util.ProcessExecutionError as e: - if e.exit_code is 1: + if e.exit_code == 1: pass else: util.logexc(LOG, "dpkg --compare-versions failed [%s]", diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 21517fd..e54a34e 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -361,7 +361,8 @@ class Renderer(renderer.Renderer): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, - explicit_end=False) + explicit_end=False, + noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return [] diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index f76e508..539b76d 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -706,9 +706,9 @@ class NetworkStateInterpreter(object): """Common ipconfig extraction from v2 to v1 subnets array.""" subnets = [] - if 'dhcp4' in cfg: + if cfg.get('dhcp4'): subnets.append({'type': 'dhcp4'}) - if 'dhcp6' in cfg: + if cfg.get('dhcp6'): self.use_ipv6 = True subnets.append({'type': 'dhcp6'}) diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 79e8842..5139024 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -117,6 +117,7 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual('eth9', call[0][1]) self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 9ff929c..e91cd26 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -141,6 +141,9 @@ def _netdev_info_ifconfig(ifconfig_data): res = re.match(r'.*<(\S+)>', toks[i + 1]) if res: devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + return devs @@ -389,8 +392,8 @@ def netdev_pformat(): addr.get('scope', empty), data["hwaddr"])) for addr in data.get('ipv6'): tbl.add_row( - (dev, data["up"], addr["ip"], empty, addr["scope6"], - data["hwaddr"])) + (dev, data["up"], addr["ip"], empty, + addr.get("scope6", empty), data["hwaddr"])) if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: tbl.add_row((dev, data["up"], empty, empty, empty, data["hwaddr"])) diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 7bcf9dd..3bd5e03 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -17,6 +17,13 @@ _CustomSafeLoader.add_constructor( _CustomSafeLoader.construct_python_unicode) +class NoAliasSafeDumper(yaml.dumper.SafeDumper): + """A class which avoids constructing anchors/aliases on yaml dump""" + + def ignore_aliases(self, data): + return True + + def load(blob): return(yaml.load(blob, Loader=_CustomSafeLoader)) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a4f998b..eccbee5 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data @@ -909,13 +912,15 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # <SSH><PublicKeys> - # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path> + # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/x/y/z</Path> # ... # </PublicKeys></SSH> + # Under some circumstances, there may be a <Value> element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 9ccf2cd..4f2f6cc 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -19,6 +19,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -107,6 +108,19 @@ class DataSourceEc2(sources.DataSource): 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True + def is_classic_instance(self): + """Report if this instance type is Ec2 Classic (non-vpc).""" + if not self.metadata: + # Can return False on inconclusive as we are also called in + # network_config where metadata will be present. + # Secondary call site is in packaging postinst script. + return False + ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + return False + return True + @property def launch_index(self): if not self.metadata: @@ -320,6 +334,13 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + # RELEASE_BLOCKER: Xenial debian/postinst needs to add + # EventType.BOOT on upgrade path for classic. + + # Non-VPC (aka Classic) Ec2 instances need to rewrite the + # network config file every boot due to MAC address change. + if self.is_classic_instance(): + self.update_events['network'].add(EventType.BOOT) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result @@ -442,7 +463,7 @@ def identify_aws(data): if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return CloudNames.AWS + return CloudNames.AWS return None diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 3a3fcdf..70e7a5c 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -15,6 +15,8 @@ import os import re import time +import six + from cloudinit import log as logging from cloudinit import sources from cloudinit import util @@ -434,7 +436,7 @@ def maybe_cdrom_device(devname): """ if not devname: return False - elif not isinstance(devname, util.string_types): + elif not isinstance(devname, six.string_types): raise ValueError("Unexpected input for devname: %s" % devname) # resolve '..' and multi '/' elements diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1..2829dd2 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29cea..8f06911 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -67,7 +67,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -600,9 +600,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +639,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8a06412..da7d349 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -548,11 +548,11 @@ class Init(object): with events.ReportEventStack("consume-user-data", "reading and applying user-data", parent=self.reporter): - self._consume_userdata(frequency) + self._consume_userdata(frequency) with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): - self._consume_vendordata(frequency) + self._consume_vendordata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 2eb7b0c..f41180f 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -41,26 +41,6 @@ _real_subp = util.subp SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf -# Used for detecting different python versions -PY2 = False -PY26 = False -PY27 = False -PY3 = False - -_PY_VER = sys.version_info -_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] -if (_PY_MAJOR, _PY_MINOR) <= (2, 6): - if (_PY_MAJOR, _PY_MINOR) == (2, 6): - PY26 = True - if (_PY_MAJOR, _PY_MINOR) >= (2, 0): - PY2 = True -else: - if (_PY_MAJOR, _PY_MINOR) == (2, 7): - PY27 = True - PY2 = True - if (_PY_MAJOR, _PY_MINOR) >= (3, 0): - PY3 = True - # Makes the old path start # with new base instead of whatever @@ -207,6 +187,7 @@ class CiTestCase(TestCase): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + logging.getLogger().level = None util.subp = _real_subp super(CiTestCase, self).tearDown() @@ -356,7 +337,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if PY3 else '__builtin__.open' + name = 'builtins.open' if six.PY3 else '__builtin__.open' self.patched_funcs.enter_context(mock.patch(name, trap_func)) def patchStdoutAndStderr(self, stdout=None, stderr=None): diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index d76e768..1c8a791 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -11,6 +11,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") @@ -18,6 +19,7 @@ SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") +FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") class TestNetInfo(CiTestCase): @@ -45,6 +47,18 @@ class TestNetInfo(CiTestCase): @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + print() + print(content) + print() + self.assertEqual(FREEBSD_NETDEV_OUT, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') def test_netdev_iproute_pformat(self, m_subp, m_which): """netdev_pformat properly rendering ip route info.""" m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 396d69a..0af0d9e 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -521,7 +521,7 @@ class OauthUrlHelper(object): if extra_exception_cb: ret = extra_exception_cb(msg, exception) finally: - self.exception_cb(msg, exception) + self.exception_cb(msg, exception) return ret def _headers_cb(self, extra_headers_cb, url): diff --git a/cloudinit/util.py b/cloudinit/util.py index a8a232b..a192091 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,11 +51,6 @@ from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) -try: - string_types = (basestring,) -except NameError: - string_types = (str,) - _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) @@ -77,7 +72,6 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], PROC_CMDLINE = None _LSB_RELEASE = {} -PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -125,7 +119,7 @@ def target_path(target, path=None): # return 'path' inside target, accepting target as None if target in (None, ""): target = "/" - elif not isinstance(target, string_types): + elif not isinstance(target, six.string_types): raise ValueError("Unexpected input for target: %s" % target) else: target = os.path.abspath(target) @@ -1596,14 +1590,17 @@ def json_dumps(data): separators=(',', ': '), default=json_serialize_default) -def yaml_dumps(obj, explicit_start=True, explicit_end=True): +def yaml_dumps(obj, explicit_start=True, explicit_end=True, noalias=False): """Return data in nicely formatted yaml.""" - return yaml.safe_dump(obj, - line_break="\n", - indent=4, - explicit_start=explicit_start, - explicit_end=explicit_end, - default_flow_style=False) + + return yaml.dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + Dumper=(safeyaml.NoAliasSafeDumper + if noalias else yaml.dumper.Dumper)) def ensure_dir(path, mode=None): @@ -2817,9 +2814,6 @@ def load_shell_content(content, add_empty=False, empty_val=None): variables. Set their value to empty_val.""" def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") return shlex.split(blob, comments=True) data = {} diff --git a/debian/changelog b/debian/changelog index a62dba2..61b5691 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,39 @@ +cloud-init (18.5-44-g7c07af28-0ubuntu1~16.04.1) xenial; urgency=medium + + * New upstream snapshot. (LP: #1819067) + - Support locking user with usermod if passwd is not available. + [Scott Moser] + - Example for Microsoft Azure data disk added. [Anton Olifir] + - clean: correctly determine the path for excluding seed directory + - helpers/openstack: Treat unknown link types as physical + - drop Python 2.6 support and our NIH version detection + - tip-pylint: Fix assignment-from-return-none errors + - net: append type:dhcp[46] only if dhcp[46] is True in v2 netconfig + [Kurt Stieger] + - cc_apt_pipelining: stop disabling pipelining by default + - tests: fix some slow tests and some leaking state + - util: don't determine string_types ourselves + - cc_rsyslog: Escape possible nested set + - Enable encrypted_data_bag_secret support for Chef [Eric Williams] + - azure: Filter list of ssh keys pulled from fabric [Jason Zions (MSFT)] + - doc: update merging doc with fixes and some additional details/examples + - tests: integration test failure summary to use traceback if empty error + - This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 + [Vitaly Kuznetsov] + - EC2: Rewrite network config on AWS Classic instances every boot + [Guilherme G. Piccoli] + - netinfo: Adjust ifconfig output parsing for FreeBSD ipv6 entries + - netplan: Don't render yaml aliases when dumping netplan + - add PyCharm IDE .idea/ path to .gitignore [Dominic Schlegel] + - correct grammar issue in instance metadata documentation + [Dominic Schlegel] + - clean: cloud-init clean should not trace when run from within cloud_dir + - Resolve flake8 comparison and pycodestyle over-ident issues + [Paride Legovini] + * Change Maintainer to Ubuntu Developers + + -- Daniel Watkins <oddbl...@ubuntu.com> Thu, 07 Mar 2019 16:34:05 -0500 + cloud-init (18.5-21-g8ee294d5-0ubuntu1~16.04.1) xenial; urgency=medium * New upstream snapshot. (LP: #1813346) diff --git a/debian/control b/debian/control index 1de4f2f..17536e0 100644 --- a/debian/control +++ b/debian/control @@ -1,7 +1,7 @@ Source: cloud-init Section: admin Priority: extra -Maintainer: Scott Moser <smo...@ubuntu.com> +Maintainer: Ubuntu Developers <ubuntu-devel-disc...@lists.ubuntu.com> Build-Depends: debhelper (>= 9), dh-python, dh-systemd, diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index defc5a5..2320e01 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -98,6 +98,9 @@ chef: # to the install script omnibus_version: "12.3.0" + # If encrypted data bags are used, the client needs to have a secrets file + # configured to decrypt them + encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret" # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt index 43a62a2..89d9ff5 100644 --- a/doc/examples/cloud-config-disk-setup.txt +++ b/doc/examples/cloud-config-disk-setup.txt @@ -17,7 +17,7 @@ fs_setup: device: ephemeral0 partition: auto -# Default disk definitions for Windows Azure +# Default disk definitions for Microsoft Azure # ------------------------------------------ device_aliases: {'ephemeral0': '/dev/sdb'} @@ -34,6 +34,21 @@ fs_setup: replace_fs: ntfs +# Data disks definitions for Microsoft Azure +# ------------------------------------------ + +disk_setup: + /dev/disk/azure/scsi1/lun0: + table_type: gpt + layout: True + overwrite: True + +fs_setup: + - device: /dev/disk/azure/scsi1/lun0 + partition: 1 + filesystem: ext4 + + # Default disk definitions for SmartOS # ------------------------------------ @@ -242,7 +257,7 @@ fs_setup: # # "false": If an existing file system exists, skip the creation. # -# <REPLACE_FS>: This is a special directive, used for Windows Azure that +# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that # instructs cloud-init to replace a file system of <FS_TYPE>. NOTE: # unless you define a label, this requires the use of the 'any' partition # directive. diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 64c325d..76beca9 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -90,4 +90,15 @@ An example configuration with the default values is provided below: max_wait: 120 timeout: 50 +Notes +----- + * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private + Cloud) and Classic ones (also known as non-VPC). One major difference + between them is that Classic instances have their MAC address changed on + stop/restart operations, so cloud-init will recreate the network config + file for EC2 Classic instances every boot. On VPC instances this file is + generated only in the first boot of the instance. + The check for the instance type is performed by is_classic_instance() + method. + .. vi: textwidth=78 diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 5d2dc94..231a008 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -4,7 +4,7 @@ Instance Metadata ***************** -What is a instance data? +What is instance data? ======================== Instance data is the collection of all configuration data that cloud-init diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst index c75ca59..5f7ca18 100644 --- a/doc/rtd/topics/merging.rst +++ b/doc/rtd/topics/merging.rst @@ -21,12 +21,12 @@ For example. .. code-block:: yaml #cloud-config (1) - run_cmd: + runcmd: - bash1 - bash2 #cloud-config (2) - run_cmd: + runcmd: - bash3 - bash4 @@ -36,7 +36,7 @@ cloud-config object that contains the following. .. code-block:: yaml #cloud-config (merged) - run_cmd: + runcmd: - bash3 - bash4 @@ -45,7 +45,7 @@ Typically this is not what users want; instead they would likely prefer: .. code-block:: yaml #cloud-config (merged) - run_cmd: + runcmd: - bash1 - bash2 - bash3 @@ -55,6 +55,45 @@ This way makes it easier to combine the various cloud-config objects you have into a more useful list, thus reducing duplication necessary to accomplish the same result with the previous method. + +Built-in Mergers +================ + +Cloud-init provides merging for the following built-in types: + +- Dict +- List +- String + +The ``Dict`` merger has the following options which control what is done with +values contained within the config. + +- ``allow_delete``: Existing values not present in the new value can be deleted, defaults to False +- ``no_replace``: Do not replace an existing value if one is already present, enabled by default. +- ``replace``: Overwrite existing values with new ones. + +The ``List`` merger has the following options which control what is done with +the values contained within the config. + +- ``append``: Add new value to the end of the list, defaults to False. +- ``prepend``: Add new values to the start of the list, defaults to False. +- ``no_replace``: Do not replace an existing value if one is already present, enabled by default. +- ``replace``: Overwrite existing values with new ones. + +The ``Str`` merger has the following options which control what is done with +the values contained within the config. + +- ``append``: Add new value to the end of the string, defaults to False. + +Common options for all merge types which control how recursive merging is +done on other types. + +- ``recurse_dict``: If True merge the new values of the dictionary, defaults to True. +- ``recurse_list``: If True merge the new values of the list, defaults to False. +- ``recurse_array``: Alias for ``recurse_list``. +- ``recurse_str``: If True merge the new values of the string, defaults to False. + + Customizability =============== @@ -164,8 +203,8 @@ string format (i.e. the second option above), for example: .. code-block:: python - {'merge_how': [{'name': 'list', 'settings': ['extend']}, - {'name': 'dict', 'settings': []}, + {'merge_how': [{'name': 'list', 'settings': ['append']}, + {'name': 'dict', 'settings': ['no_replace', 'recurse_list']}, {'name': 'str', 'settings': ['append']}]} This would be the equivalent format for default string format but in dictionary @@ -201,4 +240,43 @@ Note, however, that merge algorithms are not used *across* types of configuration. As was the case before merging was implemented, user-data will overwrite conf.d configuration without merging. +Example cloud-config +==================== + +A common request is to include multiple ``runcmd`` directives in different +files and merge all of the commands together. To achieve this, we must modify +the default merging to allow for dictionaries to join list values. + + +The first config + +.. code-block:: yaml + + #cloud-config + merge_how: + - name: list + settings: [append] + - name: dict + settings: [no_replace, recurse_list] + + runcmd: + - bash1 + - bash2 + +The second config + +.. code-block:: yaml + + #cloud-config + merge_how: + - name: list + settings: [append] + - name: dict + settings: [no_replace, recurse_list] + + runcmd: + - bash3 + - bash4 + + .. vi: textwidth=78 diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl index cbb6b15..99978d3 100644 --- a/templates/chef_client.rb.tmpl +++ b/templates/chef_client.rb.tmpl @@ -1,6 +1,6 @@ ## template:jinja {# -This file is only utilized if the module 'cc_chef' is enabled in +This file is only utilized if the module 'cc_chef' is enabled in cloud-config. Specifically, in order to enable it you need to add the following to config: chef: @@ -56,3 +56,6 @@ pid_file "{{pid_file}}" {% if show_time %} Chef::Log::Formatter.show_time = true {% endif %} +{% if encrypted_data_bag_secret %} +encrypted_data_bag_secret "{{encrypted_data_bag_secret}}" +{% endif %} diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 9911ecf..7018f4d 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -61,12 +61,17 @@ def format_test_failures(test_result): if not test_result['failures']: return '' failure_hdr = ' test failures:' - failure_fmt = ' * {module}.{class}.{function}\n {error}' + failure_fmt = ' * {module}.{class}.{function}\n ' output = [] for failure in test_result['failures']: if not output: output = [failure_hdr] - output.append(failure_fmt.format(**failure)) + msg = failure_fmt.format(**failure) + if failure.get('error'): + msg += failure['error'] + else: + msg += failure.get('traceback', '') + output.append(msg) return '\n'.join(output) diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 0000000..f7293c5 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 0000000..3521ea3 --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: <Empty Attributes> +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redac...@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redac...@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: <Empty Attributes> +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 0000000..ce9b852 --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 0000000..54d749e --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output new file mode 100644 index 0000000..3de15a5 --- /dev/null +++ b/tests/data/netinfo/freebsd-ifconfig-output @@ -0,0 +1,17 @@ +vtnet0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 + options=6c07bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6> + ether fa:16:3e:14:1f:99 + hwaddr fa:16:3e:14:1f:99 + inet 10.1.80.61 netmask 0xfffff000 broadcast 10.1.95.255 + nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL> + media: Ethernet 10Gbase-T <full-duplex> + status: active +pflog0: flags=0<> metric 0 mtu 33160 +pfsync0: flags=0<> metric 0 mtu 1500 + syncpeer: 0.0.0.0 maxupd: 128 defer: off +lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384 + options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6> + inet6 ::1 prefixlen 128 + inet6 fe80::1%lo0 prefixlen 64 scopeid 0x4 + inet 127.0.0.1 netmask 0xff000000 + nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL> diff --git a/tests/data/netinfo/freebsd-netdev-formatted-output b/tests/data/netinfo/freebsd-netdev-formatted-output new file mode 100644 index 0000000..a9d2ac1 --- /dev/null +++ b/tests/data/netinfo/freebsd-netdev-formatted-output @@ -0,0 +1,11 @@ ++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++ ++---------+-------+----------------+------------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++---------+-------+----------------+------------+-------+-------------------+ +| lo0 | True | 127.0.0.1 | 0xff000000 | . | . | +| lo0 | True | ::1/128 | . | . | . | +| lo0 | True | fe80::1%lo0/64 | . | 0x4 | . | +| pflog0 | False | . | . | . | . | +| pfsync0 | False | . | . | . | . | +| vtnet0 | True | 10.1.80.61 | 0xfffff000 | . | fa:16:3e:14:1f:99 | ++---------+-------+----------------+------------+-------+-------------------+ diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 417d86a..6b05b8f 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -11,7 +11,7 @@ from cloudinit.util import (b64e, decode_binary, load_file, write_file, from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, PY26, SkipTest) + ExitStack) import crypt import httpretty @@ -221,8 +221,6 @@ class TestAzureDataSource(CiTestCase): def setUp(self): super(TestAzureDataSource, self).setUp() - if PY26: - raise SkipTest("Does not work on python 2.6") self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty @@ -1692,6 +1690,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, fake_resp, m_media_switch, m_dhcp, diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93..0255616 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index dcdabea..520c50f 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -268,8 +268,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', side_effect=exists_side_effect())) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) self.assertEqual(exists_mock.call_count, 2) @@ -296,8 +295,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', return_value=True)) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) exists_mock.assert_called_once_with(mock.ANY) @@ -331,8 +329,7 @@ class TestConfigDriveDataSource(CiTestCase): yield True with mock.patch.object(os.path, 'exists', side_effect=exists_side_effect()): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) # We don't assert the call count for os.path.exists() because # not all of the entries in name_tests results in two calls to # that function. Specifically, 'root2k' doesn't seem to call @@ -359,8 +356,7 @@ class TestConfigDriveDataSource(CiTestCase): } for name, dev_name in name_tests.items(): with mock.patch.object(os.path, 'exists', return_value=True): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) def test_dir_valid(self): """Verify a dir is read as such.""" @@ -604,6 +600,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -730,6 +729,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1a5956d..20d59bf 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -401,6 +401,30 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index c3f258d..4062495 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -240,4 +240,32 @@ class TestCreateUser(CiTestCase): [mock.call(set(['auth1']), user), # not disabled mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, + m_is_snappy): + """Lock uses usermod --lock if no 'passwd' cmd available.""" + m_which.side_effect = lambda m: m in ('usermod',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['usermod', '--lock', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_passwd_if_available(self, m_which, m_subp, + m_is_snappy): + """Lock with only passwd will use passwd.""" + m_which.side_effect = lambda m: m in ('passwd',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['passwd', '-l', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, + m_is_snappy): + """Lock with no commands available raises RuntimeError.""" + m_which.return_value = None + with self.assertRaises(RuntimeError): + self.dist.lock_passwd("bob") + # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e986b59..e453040 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -407,7 +407,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return '/etc/netplan/50-cloud-init.yaml' def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 756b4fb..d00c1b4 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -441,7 +441,7 @@ class TestDsIdentify(DsIdentifyBase): nova does not identify itself on platforms other than intel. https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" - data = VALID_CFG['OpenStack'].copy() + data = copy.deepcopy(VALID_CFG['OpenStack']) del data['files'][P_PRODUCT_NAME] data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE, 'policy_no_dmi': POLICY_FOUND_OR_MAYBE}) diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index b16532e..f431126 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -145,6 +145,7 @@ class TestChef(FilesystemMockingTestCase): file_backup_path "/var/backups/chef" pid_file "/var/run/chef/client.pid" Chef::Log::Formatter.show_time = true + encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ tpl_file = util.load_file('templates/chef_client.rb.tmpl') self.patchUtils(self.tmp) @@ -157,6 +158,8 @@ class TestChef(FilesystemMockingTestCase): 'validation_name': 'bob', 'validation_key': "/etc/chef/vkey.pem", 'validation_cert': "this is my cert", + 'encrypted_data_bag_secret': + '/etc/chef/encrypted_data_bag_secret' }, } cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e041e97..e3b9e02 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -19,6 +19,7 @@ import gzip import io import json import os +import re import textwrap import yaml @@ -103,6 +104,326 @@ STATIC_EXPECTED_1 = { 'address': '10.0.0.2'}], } +V1_NAMESERVER_ALIAS = """ +config: +- id: eno1 + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: eno1 + subnets: + - type: manual + type: physical +- id: eno2 + mac_address: 08:94:ef:51:ae:e1 + mtu: 1500 + name: eno2 + subnets: + - type: manual + type: physical +- id: eno3 + mac_address: 08:94:ef:51:ae:de + mtu: 1500 + name: eno3 + subnets: + - type: manual + type: physical +- bond_interfaces: + - eno1 + - eno3 + id: bondM + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: bondM + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.10.47/23 + gateway: 10.101.11.254 + type: static + type: bond +- id: eno4 + mac_address: 08:94:ef:51:ae:df + mtu: 1500 + name: eno4 + subnets: + - type: manual + type: physical +- id: enp0s20f0u1u6 + mac_address: 0a:94:ef:51:a4:b9 + mtu: 1500 + name: enp0s20f0u1u6 + subnets: + - type: manual + type: physical +- id: enp216s0f0 + mac_address: 68:05:ca:81:7c:e8 + mtu: 9000 + name: enp216s0f0 + subnets: + - type: manual + type: physical +- id: enp216s0f1 + mac_address: 68:05:ca:81:7c:e9 + mtu: 9000 + name: enp216s0f1 + subnets: + - type: manual + type: physical +- id: enp47s0f0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: enp47s0f0 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f0 + - enp47s0f0 + id: bond0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: bond0 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - type: manual + type: bond +- id: bond0.3502 + mtu: 9000 + name: bond0.3502 + subnets: + - address: 172.20.80.4/25 + type: static + type: vlan + vlan_id: 3502 + vlan_link: bond0 +- id: bond0.3503 + mtu: 9000 + name: bond0.3503 + subnets: + - address: 172.20.80.129/25 + type: static + type: vlan + vlan_id: 3503 + vlan_link: bond0 +- id: enp47s0f1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: enp47s0f1 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f1 + - enp47s0f1 + id: bond1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: bond1 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.8.65/26 + routes: + - destination: 213.119.192.0/24 + gateway: 10.101.8.126 + metric: 0 + type: static + type: bond +- address: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + type: nameserver +version: 1 +""" + +NETPLAN_NO_ALIAS = """ +network: + version: 2 + ethernets: + eno1: + match: + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + set-name: eno1 + eno2: + match: + macaddress: 08:94:ef:51:ae:e1 + mtu: 1500 + set-name: eno2 + eno3: + match: + macaddress: 08:94:ef:51:ae:de + mtu: 1500 + set-name: eno3 + eno4: + match: + macaddress: 08:94:ef:51:ae:df + mtu: 1500 + set-name: eno4 + enp0s20f0u1u6: + match: + macaddress: 0a:94:ef:51:a4:b9 + mtu: 1500 + set-name: enp0s20f0u1u6 + enp216s0f0: + match: + macaddress: 68:05:ca:81:7c:e8 + mtu: 9000 + set-name: enp216s0f0 + enp216s0f1: + match: + macaddress: 68:05:ca:81:7c:e9 + mtu: 9000 + set-name: enp216s0f1 + enp47s0f0: + match: + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + set-name: enp47s0f0 + enp47s0f1: + match: + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + set-name: enp47s0f1 + bonds: + bond0: + interfaces: + - enp216s0f0 + - enp47s0f0 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + bond1: + addresses: + - 10.101.8.65/26 + interfaces: + - enp216s0f1 + - enp47s0f1 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + routes: + - metric: 0 + to: 213.119.192.0/24 + via: 10.101.8.126 + bondM: + addresses: + - 10.101.10.47/23 + gateway4: 10.101.11.254 + interfaces: + - eno1 + - eno3 + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + vlans: + bond0.3502: + addresses: + - 172.20.80.4/25 + id: 3502 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + bond0.3503: + addresses: + - 172.20.80.129/25 + id: 3503 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas +""" + +NETPLAN_DHCP_FALSE = """ +version: 2 +ethernets: + ens3: + match: + macaddress: 52:54:00:ab:cd:ef + dhcp4: false + dhcp6: false + addresses: + - 192.168.42.100/24 + - 2001:db8::100/32 + gateway4: 192.168.42.1 + gateway6: 2001:db8::1 + nameservers: + search: [example.com] + addresses: [192.168.42.53, 1.1.1.1] +""" + # Examples (and expected outputs for various renderers). OS_SAMPLES = [ { @@ -2286,6 +2607,50 @@ USERCTL=no config = sysconfig.ConfigObj(nm_cfg) self.assertIn('ifcfg-rh', config['main']['plugins']) + def test_netplan_dhcp_false_disable_dhcp_in_state(self): + """netplan config with dhcp[46]: False should not add dhcp in state""" + net_config = yaml.load(NETPLAN_DHCP_FALSE) + ns = network_state.parse_net_config_data(net_config, + skip_broken=False) + + dhcp_found = [snet for iface in ns.iter_interfaces() + for snet in iface['subnets'] if 'dhcp' in snet['type']] + + self.assertEqual([], dhcp_found) + + def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): + """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" + + entry = { + 'yaml': NETPLAN_DHCP_FALSE, + 'expected_sysconfig': { + 'ifcfg-ens3': textwrap.dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=ens3 + DNS1=192.168.42.53 + DNS2=1.1.1.1 + DOMAIN=example.com + GATEWAY=192.168.42.1 + HWADDR=52:54:00:ab:cd:ef + IPADDR=192.168.42.100 + IPV6ADDR=2001:db8::100/32 + IPV6INIT=yes + IPV6_DEFAULTGW=2001:db8::1 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + } + + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._assert_headers(found) + class TestOpenSuseSysConfigRendering(CiTestCase): @@ -3065,6 +3430,38 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def test_render_output_has_yaml_no_aliases(self): + entry = { + 'yaml': V1_NAMESERVER_ALIAS, + 'expected_netplan': NETPLAN_NO_ALIAS, + } + network_config = yaml.load(entry['yaml']) + ns = network_state.parse_net_config_data(network_config) + files = self._render_and_read(state=ns) + # check for alias + content = files['/etc/netplan/50-cloud-init.yaml'] + + # test load the yaml to ensure we don't render something not loadable + # this allows single aliases, but not duplicate ones + parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml']) + self.assertNotEqual(None, parsed) + + # now look for any alias, avoid rendering them entirely + # generate the first anchor string using the template + # as of this writing, looks like "&id001" + anchor = r'&' + yaml.serializer.Serializer.ANCHOR_TEMPLATE % 1 + found_alias = re.search(anchor, content, re.MULTILINE) + if found_alias: + msg = "Error at: %s\nContent:\n%s" % (found_alias, content) + raise ValueError('Found yaml alias in rendered netplan: ' + msg) + + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + class TestEniRoundTrip(CiTestCase): diff --git a/tools/cloud-init-per b/tools/cloud-init-per index 7d6754b..eae3e93 100755 --- a/tools/cloud-init-per +++ b/tools/cloud-init-per @@ -38,7 +38,7 @@ fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } [ $# -ge 3 ] || { Usage 1>&2; exit 1; } freq=$1 -name=$2 +name=${2/-/_} shift 2; [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" @@ -53,6 +53,12 @@ esac [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" || fail "failed to make directory for ${sem}" +# Rename legacy sem files with dashes in their names. Do not overwrite existing +# sem files to prevent clobbering those which may have been created from calls +# outside of cloud-init. +sem_legacy="${sem/_/-}" +[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" + [ "$freq" != "always" -a -e "$sem" ] && exit 0 "$@" ret=$?
_______________________________________________ Mailing list: https://launchpad.net/~cloud-init-dev Post to : cloud-init-dev@lists.launchpad.net Unsubscribe : https://launchpad.net/~cloud-init-dev More help : https://help.launchpad.net/ListHelp