Needs fixing. There is no explicit testing that ensemble status is depending on the actions of the StatusManager. Instead it's taking advantage of the fact that get_status will work even if the /status znode is not set in ZK. So things work regardless. Maybe the better approach in testing is as follows: 1. Explicitly instantiate a StatusManager in the control tests. Verify that it properly writes to the /status znode with before and after runs of ensemble status, when the state of the system is changing (eg with build_topology). 2. Add some minimal testing in emsemble.agents.tests.test_provision to verify that the StatusManager is properly instantiated and running. There's nothing there now to verify that. 3. Verify in ensemble.state.tests.test_status that the proper caching behavior is seen. Otherwise I think the tests look fine for that. Some additional comments: [1] + self.mocker.count(min=0, max=None) + + r = self.mocker.replace("ensemble.providers.common.utils.get_active_machine_provider") + r(ANY) + self.mocker.result(self.provider) + self.mocker.count(min=0, max=None) I'm a bit leery of mocks that are so open. Nothing is really being verified here. Maybe it's called, maybe it isn't. Is it possible to better constrain them, or check their args? [2] - This is handy for a default initialization. + This is handy for a default initialisation. Are we committing to British spelling? ;) Works for me either way. [3] +@inlineCallbacks +def get_active_machine_provider(client): + """Return the machine environment for the running environment. + + This is used on the deployment side to get a machine provider + which can be used to pull machine specific values like DNS names. + """ + env_manager = EnvironmentStateManager(client) + env_config = yield env_manager.get_config() + env = env_config.get_default() + machine_provider = env.get_machine_provider() + returnValue(machine_provider) No tests for this code. [4] +# Version of the status serialisation +VERSION = 1 Good idea. I wonder if it should also be reported to the user instead of being deleted by client code here, so that code using status info can determine their backwards compatibility issues, if any. [5] +class StatusManager(StateBase): + """Watch deployments for status state changes.""" + def __init__(self, client): + super(StatusManager, self).__init__(client) + self._status = None + + @inlineCallbacks + def get_status(self): + """Get current status information.""" + if self._status is None: + status = yield self._load_status() + if status is None: + status = yield self.collect() + + self._status = status + returnValue(self._status) From what I can tell, the caching behavior with _load_status is only exercised indirectly (by ensemble.control.tests.test_status.StatusTest.test_collect_filtering). [6] + @inlineCallbacks + def build_topology(self, base=None): + """Build a simulated topology with a default machine configuration. + + This method returns a dict that can be used to get handles to + the constructed objects. + """ + state = {} + + # build out the topology using the state managers + m1 = yield self.machine_state_manager.add_machine_state() + m2 = yield self.machine_state_manager.add_machine_state() + m3 = yield self.machine_state_manager.add_machine_state() + m4 = yield self.machine_state_manager.add_machine_state() + m5 = yield self.machine_state_manager.add_machine_state() + m6 = yield self.machine_state_manager.add_machine_state() + m7 = yield self.machine_state_manager.add_machine_state() + + # inform the provider about the machine + yield self.provider.start_machine({"machine-id": 0, + "dns-name": "steamcloud-1.com"}) + yield self.provider.start_machine({"machine-id": 1, + "dns-name": "steamcloud-2.com"}) + yield self.provider.start_machine({"machine-id": 2, + "dns-name": "steamcloud-3.com"}) + yield self.provider.start_machine({"machine-id": 3, + "dns-name": "steamcloud-4.com"}) + yield self.provider.start_machine({"machine-id": 4, + "dns-name": "steamcloud-5.com"}) + yield self.provider.start_machine({"machine-id": 5, + "dns-name": "steamcloud-6.com"}) + yield self.provider.start_machine({"machine-id": 6, + "dns-name": "steamcloud-7.com"}) + + yield m1.set_instance_id(0) + yield m2.set_instance_id(1) + yield m3.set_instance_id(2) + yield m4.set_instance_id(3) + yield m5.set_instance_id(4) + yield m6.set_instance_id(5) + yield m7.set_instance_id(6) + + state["machines"] = [m1, m2, m3, m4, m5, m6, m7] + + # "Deploy" services + wordpress = yield self.add_service_from_formula("wordpress") + mysql = yield self.add_service_from_formula("mysql") + yield mysql.set_exposed_flag() # but w/ no open ports + + varnish = yield self.add_service_from_formula("varnish") + yield varnish.set_exposed_flag() + # w/o additional metadata + memcache = yield self.add_service("memcache") + + state["services"] = dict(wordpress=wordpress, mysql=mysql, + varnish=varnish, memcache=memcache) + + # add unit states to services and assign to machines + wpu = yield wordpress.add_unit_state() + yield wpu.assign_to_machine(m1) + + myu1 = yield mysql.add_unit_state() + myu2 = yield mysql.add_unit_state() + yield myu1.assign_to_machine(m2) + yield myu2.assign_to_machine(m3) + + vu1 = yield varnish.add_unit_state() + vu2 = yield varnish.add_unit_state() + yield vu1.assign_to_machine(m4) + yield vu2.assign_to_machine(m5) + + mc1 = yield memcache.add_unit_state() + mc2 = yield memcache.add_unit_state() + yield mc1.assign_to_machine(m6) + yield mc2.assign_to_machine(m7) + + # Set the lifecycle state and open ports, if any, for each unit state. + yield self.set_unit_state(wpu, "started", [(80, "tcp"), (443, "tcp")]) + yield self.set_unit_state(myu1, "started") + yield self.set_unit_state(myu2, "stopped") + yield self.set_unit_state(vu1, "started", [(80, "tcp")]) + yield self.set_unit_state(vu2, "started", [(80, "tcp")]) + yield self.set_unit_state(mc1, "started") + yield self.set_unit_state(mc2, "installed") + + # Wordpress integrates with each of the following + # services. Each relation endpoint is used to define the + # specific relation to be established. + mysql_ep = RelationEndpoint( + "mysql", "client-server", "db", "server") + memcache_ep = RelationEndpoint( + "memcache", "client-server", "cache", "server") + varnish_ep = RelationEndpoint( + "varnish", "client-server", "proxy", "client") + + wordpress_db_ep = RelationEndpoint( + "wordpress", "client-server", "db", "client") + wordpress_cache_ep = RelationEndpoint( + "wordpress", "client-server", "cache", "client") + wordpress_proxy_ep = RelationEndpoint( + "wordpress", "client-server", "proxy", "server") + + # Create relation service units for each of these relations + yield self.add_relation_with_relation_units( + mysql_ep, [myu1, myu2], ["up", "departed"], + wordpress_db_ep, [wpu], ["up"]) + yield self.add_relation_with_relation_units( + memcache_ep, [mc1, mc2], ["up", "down"], + wordpress_cache_ep, [wpu], ["up"]) + yield self.add_relation_with_relation_units( + varnish_ep, [vu1, vu2], ["up", "up"], + wordpress_proxy_ep, [wpu], ["up"]) + + state["relations"] = dict( + wordpress=[wpu], + mysql=[myu1, myu2], + varnish=[vu1, vu2], + memcache=[mc1, mc2] + ) + returnValue(state) Maybe the definition of helpers like build_topology can be shared between the two test_status modules?