Skip to content

Commit

Permalink
Fix nova/compute direct use of instance module objects
Browse files Browse the repository at this point in the history
This switches the nova/compute code to use nova.objects.Instance* vs
nova.objects.instance.Instance* so that subclassing of objects will work
properly.

Partial-Blueprint: object-subclassing

Change-Id: I941ebb14caeb29fe55bb93bccaf95fd158024d34
  • Loading branch information
comstud committed May 21, 2014
1 parent 6e2032d commit 1fa07bd
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 49 deletions.
14 changes: 7 additions & 7 deletions nova/compute/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import objects
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_group as instance_group_obj
from nova.objects import instance_info_cache
Expand Down Expand Up @@ -821,7 +821,7 @@ def _provision_instances(self, context, instance_type, min_count,
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance = objects.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
Expand Down Expand Up @@ -1784,10 +1784,10 @@ def get(self, context, instance_id, want_objects=False,
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
instance = objects.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
instance = objects.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
Expand Down Expand Up @@ -1903,7 +1903,7 @@ def _get_instances_by_filters(self, context, filters,
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return instance_obj.InstanceList.get_by_filters(
return objects.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)

Expand Down Expand Up @@ -3028,8 +3028,8 @@ def evacuate(self, context, instance, host, on_shared_storage,
self._record_action_start(context, instance, instance_actions.EVACUATE)

# NOTE(danms): Transitional until evacuate supports objects
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
inst_obj = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])

return self.compute_rpcapi.rebuild_instance(context,
Expand Down
4 changes: 2 additions & 2 deletions nova/compute/claims.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"""

from nova import exception
from nova.objects import instance as instance_obj
from nova.objects import base as obj_base
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
Expand Down Expand Up @@ -76,7 +76,7 @@ def __init__(self, instance, tracker, resources, overhead=None,
limits=None):
super(Claim, self).__init__()
# Stash a copy of the instance at the current point of time
if isinstance(instance, instance_obj.Instance):
if isinstance(instance, obj_base.NovaObject):
self.instance = instance.obj_clone()
else:
# This does not use copy.deepcopy() because it could be
Expand Down
72 changes: 37 additions & 35 deletions nova/compute/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
Expand Down Expand Up @@ -378,8 +379,8 @@ def object_compat(function):
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance_or_dict,
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
Expand Down Expand Up @@ -656,7 +657,7 @@ def _get_instances_on_driver(self, context, filters=None):
try:
driver_uuids = self.driver.list_instance_uuids()
filters['uuid'] = driver_uuids
local_instances = instance_obj.InstanceList.get_by_filters(
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
Expand All @@ -665,8 +666,8 @@ def _get_instances_on_driver(self, context, filters=None):
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = instance_obj.InstanceList.get_by_filters(context, filters,
use_slave=True)
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = dict((instance.name, instance) for instance in instances)
local_instances = []
for driver_instance in driver_instances:
Expand Down Expand Up @@ -1004,8 +1005,8 @@ def handle_lifecycle_event(self, event):
{'state': event.get_transition(),
'uuid': event.get_instance_uuid()})
context = nova.context.get_admin_context(read_deleted='yes')
instance = instance_obj.Instance.get_by_uuid(
context, event.get_instance_uuid())
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid())
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
Expand Down Expand Up @@ -1041,7 +1042,7 @@ def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = instance_obj.InstanceList.get_by_host(
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache'])

if CONF.defer_iptables_apply:
Expand Down Expand Up @@ -1140,9 +1141,9 @@ def _get_instance_nw_info(self, context, instance, use_slave=False):
# If we get an instance without it, re-fetch so that the call
# to network_api (which requires it for instance_type) will
# succeed.
instance = instance_obj.Instance.get_by_uuid(context,
instance['uuid'],
use_slave=use_slave)
instance = objects.Instance.get_by_uuid(context,
instance['uuid'],
use_slave=use_slave)

network_info = self.network_api.get_instance_nw_info(context,
instance)
Expand Down Expand Up @@ -1490,7 +1491,7 @@ def _check_instance_build_time(self, context):
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}

building_insts = instance_obj.InstanceList.get_by_filters(context,
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)

for instance in building_insts:
Expand Down Expand Up @@ -3109,8 +3110,9 @@ def do_confirm_resize(context, instance, migration_id):
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata']
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance.uuid, expected_attrs=expected_attrs)
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_("Instance is not found during confirmation"),
context=context, instance=instance)
Expand Down Expand Up @@ -3972,8 +3974,8 @@ def inject_network_info(self, context, instance):
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance)
context = context.elevated()
LOG.audit(_("Get console output"), context=context,
instance=instance)
Expand Down Expand Up @@ -4373,9 +4375,9 @@ def remove_volume_connection(self, context, volume_id, instance):
try:
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(),
instance)
inst_obj = objects.Instance._from_db_object(context,
objects.Instance(),
instance)
self._detach_volume(context, inst_obj, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
Expand Down Expand Up @@ -4555,11 +4557,11 @@ def live_migration(self, context, dest, instance, block_migration,
# NOTE(danms): since instance is not the first parameter, we can't
# use @object_compat on this method. Since this is the only example,
# we do this manually instead of complicating the decorator
if not isinstance(instance, instance_obj.Instance):
if not isinstance(instance, obj_base.NovaObject):
expected = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=expected)

# Create a local copy since we'll be modifying the dictionary
Expand Down Expand Up @@ -4857,7 +4859,7 @@ def _heal_instance_info_cache(self, context):
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = instance_obj.InstanceList.get_by_host(
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refersh the cache for instances
Expand Down Expand Up @@ -4885,7 +4887,7 @@ def _heal_instance_info_cache(self, context):
# Find the next valid instance on the list
while instance_uuids:
try:
inst = instance_obj.Instance.get_by_uuid(
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache'],
use_slave=True)
Expand Down Expand Up @@ -4926,7 +4928,7 @@ def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state': task_states.REBOOTING,
'host': self.host}
rebooting = instance_obj.InstanceList.get_by_filters(
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)

to_poll = []
Expand All @@ -4942,7 +4944,7 @@ def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = instance_obj.InstanceList.get_by_filters(
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)

Expand Down Expand Up @@ -4989,7 +4991,7 @@ def _set_migration_to_error(migration, reason, **kwargs):
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
Expand Down Expand Up @@ -5028,7 +5030,7 @@ def _poll_shelved_instances(self, context):

filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = instance_obj.InstanceList.get_by_filters(
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)

Expand Down Expand Up @@ -5059,7 +5061,7 @@ def _instance_usage_audit(self, context):
return

begin, end = utils.last_completed_audit_period()
instances = instance_obj.InstanceList.get_active_by_window_joined(
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata'])
num_instances = len(instances)
Expand Down Expand Up @@ -5124,7 +5126,7 @@ def _poll_bandwidth_usage(self, context):
else:
update_cells = False

instances = instance_obj.InstanceList.get_by_host(context,
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
Expand Down Expand Up @@ -5196,7 +5198,7 @@ def _poll_bandwidth_usage(self, context):
def _get_host_volume_bdms(self, context):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = instance_obj.InstanceList.get_by_host(context, self.host)
instances = objects.InstanceList.get_by_host(context, self.host)
for instance in instances:
instance_bdms = [bdm for bdm in
(block_device_obj.BlockDeviceMappingList.
Expand Down Expand Up @@ -5252,7 +5254,7 @@ def _sync_power_states(self, context):
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = instance_obj.InstanceList.get_by_host(context,
db_instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)

Expand Down Expand Up @@ -5448,7 +5450,7 @@ def _reclaim_queued_deletes(self, context):
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = instance_obj.InstanceList.get_by_filters(
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
Expand Down Expand Up @@ -5708,7 +5710,7 @@ def _run_image_cache_manager_pass(self, context):
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = instance_obj.InstanceList.get_by_filters(context,
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)

self.driver.manage_image_cache(context, filtered_instances)
Expand All @@ -5726,7 +5728,7 @@ def _run_pending_deletes(self, context):
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = instance_obj.InstanceList.get_by_filters(
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs)
LOG.debug('There are %d instances to clean', len(instances))

Expand Down
4 changes: 2 additions & 2 deletions nova/compute/resource_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@
from nova import conductor
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
Expand Down Expand Up @@ -311,7 +311,7 @@ def update_available_resource(self, context):
'pci_passthrough_devices')))

# Grab all instances assigned to this node:
instances = instance_obj.InstanceList.get_by_host_and_node(
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, self.nodename)

# Now calculate usage based on instance utilization:
Expand Down
4 changes: 2 additions & 2 deletions nova/compute/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova.objects import instance as instance_obj
from nova.objects import base as obj_base
from nova.objects import instance_action as instance_action_obj
from nova.objects import instance_fault as instance_fault_obj
from nova.openstack.common.gettextutils import _
Expand Down Expand Up @@ -338,7 +338,7 @@ def notify_about_host_update(context, event_suffix, host_payload):


def get_nw_info_for_instance(instance):
if isinstance(instance, instance_obj.Instance):
if isinstance(instance, obj_base.NovaObject):
if instance.info_cache is None:
return network_model.NetworkInfo.hydrate([])
return instance.info_cache.network_info
Expand Down
7 changes: 7 additions & 0 deletions nova/objects/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.

# NOTE(comstud): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.


def register_all():
# NOTE(danms): You must make sure your object gets imported in this
Expand Down
2 changes: 1 addition & 1 deletion nova/tests/compute/test_compute_mgr.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def _do_mock_calls(defer_iptables_apply):
self.mox.VerifyAll()
self.mox.UnsetStubs()

@mock.patch('nova.objects.instance.InstanceList')
@mock.patch('nova.objects.InstanceList')
def test_cleanup_host(self, mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
Expand Down

0 comments on commit 1fa07bd

Please sign in to comment.