1
# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
4
# Copyright 2010 United States Government as represented by the
5
# Administrator of the National Aeronautics and Space Administration.
8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
9
# not use this file except in compliance with the License. You may obtain
10
# a copy of the License at
12
# http://www.apache.org/licenses/LICENSE-2.0
14
# Unless required by applicable law or agreed to in writing, software
15
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17
# License for the specific language governing permissions and limitations
20
"""Implementation of SQLAlchemy backend."""
28
from nova import block_device
29
from nova.common.sqlalchemyutils import paginate_query
30
from nova.compute import vm_states
32
from nova.db.sqlalchemy import models
33
from nova.db.sqlalchemy.session import get_session
34
from nova import exception
35
from nova import flags
36
from nova.openstack.common import log as logging
37
from nova.openstack.common import timeutils
38
from nova import utils
39
from sqlalchemy import and_
40
from sqlalchemy.exc import IntegrityError
41
from sqlalchemy import or_
42
from sqlalchemy.orm import joinedload
43
from sqlalchemy.orm import joinedload_all
44
from sqlalchemy.sql.expression import asc
45
from sqlalchemy.sql.expression import desc
46
from sqlalchemy.sql.expression import literal_column
47
from sqlalchemy.sql import func
51
LOG = logging.getLogger(__name__)
54
def is_admin_context(context):
55
"""Indicates if the request context is an administrator."""
57
warnings.warn(_('Use of empty request context is deprecated'),
59
raise Exception('die')
60
return context.is_admin
63
def is_user_context(context):
64
"""Indicates if the request context is a normal user."""
69
if not context.user_id or not context.project_id:
74
def authorize_project_context(context, project_id):
75
"""Ensures a request has permission to access the given project."""
76
if is_user_context(context):
77
if not context.project_id:
78
raise exception.NotAuthorized()
79
elif context.project_id != project_id:
80
raise exception.NotAuthorized()
83
def authorize_user_context(context, user_id):
84
"""Ensures a request has permission to access the given user."""
85
if is_user_context(context):
86
if not context.user_id:
87
raise exception.NotAuthorized()
88
elif context.user_id != user_id:
89
raise exception.NotAuthorized()
92
def authorize_quota_class_context(context, class_name):
93
"""Ensures a request has permission to access the given quota class."""
94
if is_user_context(context):
95
if not context.quota_class:
96
raise exception.NotAuthorized()
97
elif context.quota_class != class_name:
98
raise exception.NotAuthorized()
101
def require_admin_context(f):
102
"""Decorator to require admin request context.
104
The first argument to the wrapped function must be the context.
108
def wrapper(*args, **kwargs):
109
if not is_admin_context(args[0]):
110
raise exception.AdminRequired()
111
return f(*args, **kwargs)
115
def require_context(f):
116
"""Decorator to require *any* user or admin context.
118
This does no authorization for user or project access matching, see
119
:py:func:`authorize_project_context` and
120
:py:func:`authorize_user_context`.
122
The first argument to the wrapped function must be the context.
126
def wrapper(*args, **kwargs):
127
if not is_admin_context(args[0]) and not is_user_context(args[0]):
128
raise exception.NotAuthorized()
129
return f(*args, **kwargs)
133
def require_instance_exists(f):
134
"""Decorator to require the specified instance to exist.
136
Requires the wrapped function to use context and instance_id as
137
their first two arguments.
140
def wrapper(context, instance_id, *args, **kwargs):
141
db.instance_get(context, instance_id)
142
return f(context, instance_id, *args, **kwargs)
147
def require_instance_exists_using_uuid(f):
148
"""Decorator to require the specified instance to exist.
150
Requires the wrapped function to use context and instance_uuid as
151
their first two arguments.
154
def wrapper(context, instance_uuid, *args, **kwargs):
155
db.instance_get_by_uuid(context, instance_uuid)
156
return f(context, instance_uuid, *args, **kwargs)
161
def require_volume_exists(f):
162
"""Decorator to require the specified volume to exist.
164
Requires the wrapped function to use context and volume_id as
165
their first two arguments.
168
def wrapper(context, volume_id, *args, **kwargs):
169
db.volume_get(context, volume_id)
170
return f(context, volume_id, *args, **kwargs)
171
wrapper.__name__ = f.__name__
175
def require_aggregate_exists(f):
176
"""Decorator to require the specified aggregate to exist.
178
Requires the wrapped function to use context and aggregate_id as
179
their first two arguments.
183
def wrapper(context, aggregate_id, *args, **kwargs):
184
db.aggregate_get(context, aggregate_id)
185
return f(context, aggregate_id, *args, **kwargs)
189
def model_query(context, model, *args, **kwargs):
190
"""Query helper that accounts for context's `read_deleted` field.
192
:param context: context to query under
193
:param session: if present, the session to use
194
:param read_deleted: if present, overrides context's read_deleted field.
195
:param project_only: if present and context is user-type, then restrict
196
query to match the context's project_id. If set to 'allow_none',
197
restriction includes project_id = None.
199
session = kwargs.get('session') or get_session()
200
read_deleted = kwargs.get('read_deleted') or context.read_deleted
201
project_only = kwargs.get('project_only', False)
203
query = session.query(model, *args)
205
if read_deleted == 'no':
206
query = query.filter_by(deleted=False)
207
elif read_deleted == 'yes':
208
pass # omit the filter to include deleted and active
209
elif read_deleted == 'only':
210
query = query.filter_by(deleted=True)
213
_("Unrecognized read_deleted value '%s'") % read_deleted)
215
if is_user_context(context) and project_only:
216
if project_only == 'allow_none':
217
query = query.filter(or_(model.project_id == context.project_id,
218
model.project_id == None))
220
query = query.filter_by(project_id=context.project_id)
225
def exact_filter(query, model, filters, legal_keys):
226
"""Applies exact match filtering to a query.
228
Returns the updated query. Modifies filters argument to remove
231
:param query: query to apply filters to
232
:param model: model object the query applies to, for IN-style
234
:param filters: dictionary of filters; values that are lists,
235
tuples, sets, or frozensets cause an 'IN' test to
236
be performed, while exact matching ('==' operator)
237
is used for other values
238
:param legal_keys: list of keys to apply exact filtering to
243
# Walk through all the keys
244
for key in legal_keys:
245
# Skip ones we're not filtering on
246
if key not in filters:
249
# OK, filtering on this key; what value do we search for?
250
value = filters.pop(key)
252
if key == 'metadata':
253
column_attr = getattr(model, key)
254
if isinstance(value, list):
256
for k, v in item.iteritems():
257
query = query.filter(column_attr.any(key=k))
258
query = query.filter(column_attr.any(value=v))
261
for k, v in value.iteritems():
262
query = query.filter(column_attr.any(key=k))
263
query = query.filter(column_attr.any(value=v))
264
elif isinstance(value, (list, tuple, set, frozenset)):
265
# Looking for values in a list; apply to query directly
266
column_attr = getattr(model, key)
267
query = query.filter(column_attr.in_(value))
269
# OK, simple exact match; save for later
270
filter_dict[key] = value
272
# Apply simple exact matches
274
query = query.filter_by(**filter_dict)
282
def constraint(**conditions):
283
return Constraint(conditions)
286
def equal_any(*values):
287
return EqualityCondition(values)
290
def not_equal(*values):
291
return InequalityCondition(values)
294
class Constraint(object):
296
def __init__(self, conditions):
297
self.conditions = conditions
299
def apply(self, model, query):
300
for key, condition in self.conditions.iteritems():
301
for clause in condition.clauses(getattr(model, key)):
302
query = query.filter(clause)
306
class EqualityCondition(object):
308
def __init__(self, values):
311
def clauses(self, field):
312
return or_([field == value for value in self.values])
315
class InequalityCondition(object):
317
def __init__(self, values):
320
def clauses(self, field):
321
return [field != value for value in self.values]
327
@require_admin_context
328
def service_destroy(context, service_id):
329
session = get_session()
330
with session.begin():
331
service_ref = service_get(context, service_id, session=session)
332
service_ref.delete(session=session)
334
if service_ref.topic == 'compute' and service_ref.compute_node:
335
for c in service_ref.compute_node:
336
c.delete(session=session)
339
@require_admin_context
340
def service_get(context, service_id, session=None):
341
result = model_query(context, models.Service, session=session).\
342
options(joinedload('compute_node')).\
343
filter_by(id=service_id).\
346
raise exception.ServiceNotFound(service_id=service_id)
351
@require_admin_context
352
def service_get_all(context, disabled=None):
353
query = model_query(context, models.Service)
355
if disabled is not None:
356
query = query.filter_by(disabled=disabled)
361
@require_admin_context
362
def service_get_all_by_topic(context, topic):
363
return model_query(context, models.Service, read_deleted="no").\
364
filter_by(disabled=False).\
365
filter_by(topic=topic).\
369
@require_admin_context
370
def service_get_by_host_and_topic(context, host, topic):
371
return model_query(context, models.Service, read_deleted="no").\
372
filter_by(disabled=False).\
373
filter_by(host=host).\
374
filter_by(topic=topic).\
378
@require_admin_context
379
def service_get_all_by_host(context, host):
380
return model_query(context, models.Service, read_deleted="no").\
381
filter_by(host=host).\
385
@require_admin_context
386
def service_get_all_compute_by_host(context, host):
387
result = model_query(context, models.Service, read_deleted="no").\
388
options(joinedload('compute_node')).\
389
filter_by(host=host).\
390
filter_by(topic="compute").\
394
raise exception.ComputeHostNotFound(host=host)
399
@require_admin_context
400
def _service_get_all_topic_subquery(context, session, topic, subq, label):
401
sort_value = getattr(subq.c, label)
402
return model_query(context, models.Service,
403
func.coalesce(sort_value, 0),
404
session=session, read_deleted="no").\
405
filter_by(topic=topic).\
406
filter_by(disabled=False).\
407
outerjoin((subq, models.Service.host == subq.c.host)).\
408
order_by(sort_value).\
412
@require_admin_context
413
def service_get_all_compute_sorted(context):
414
session = get_session()
415
with session.begin():
416
# NOTE(vish): The intended query is below
417
# SELECT services.*, COALESCE(inst_cores.instance_cores,
419
# FROM services LEFT OUTER JOIN
420
# (SELECT host, SUM(instances.vcpus) AS instance_cores
421
# FROM instances GROUP BY host) AS inst_cores
422
# ON services.host = inst_cores.host
424
label = 'instance_cores'
425
subq = model_query(context, models.Instance.host,
426
func.sum(models.Instance.vcpus).label(label),
427
session=session, read_deleted="no").\
428
group_by(models.Instance.host).\
430
return _service_get_all_topic_subquery(context,
437
@require_admin_context
438
def service_get_all_volume_sorted(context):
439
session = get_session()
440
with session.begin():
442
label = 'volume_gigabytes'
443
subq = model_query(context, models.Volume.host,
444
func.sum(models.Volume.size).label(label),
445
session=session, read_deleted="no").\
446
group_by(models.Volume.host).\
448
return _service_get_all_topic_subquery(context,
455
@require_admin_context
456
def service_get_by_args(context, host, binary):
457
result = model_query(context, models.Service).\
458
filter_by(host=host).\
459
filter_by(binary=binary).\
463
raise exception.HostBinaryNotFound(host=host, binary=binary)
468
@require_admin_context
469
def service_create(context, values):
470
service_ref = models.Service()
471
service_ref.update(values)
472
if not FLAGS.enable_new_services:
473
service_ref.disabled = True
478
@require_admin_context
479
def service_update(context, service_id, values):
480
session = get_session()
481
with session.begin():
482
service_ref = service_get(context, service_id, session=session)
483
service_ref.update(values)
484
service_ref.save(session=session)
489
def compute_node_get(context, compute_id, session=None):
490
result = model_query(context, models.ComputeNode, session=session).\
491
filter_by(id=compute_id).\
492
options(joinedload('service')).\
493
options(joinedload('stats')).\
497
raise exception.ComputeHostNotFound(host=compute_id)
502
@require_admin_context
503
def compute_node_get_all(context, session=None):
504
return model_query(context, models.ComputeNode, session=session).\
505
options(joinedload('service')).\
506
options(joinedload('stats')).\
510
@require_admin_context
511
def compute_node_search_by_hypervisor(context, hypervisor_match):
512
field = models.ComputeNode.hypervisor_hostname
513
return model_query(context, models.ComputeNode).\
514
options(joinedload('service')).\
515
filter(field.like('%%%s%%' % hypervisor_match)).\
519
def _prep_stats_dict(values):
520
"""Make list of ComputeNodeStats"""
522
d = values.get('stats', {})
523
for k, v in d.iteritems():
524
stat = models.ComputeNodeStat()
528
values['stats'] = stats
531
@require_admin_context
532
def compute_node_create(context, values, session=None):
533
"""Creates a new ComputeNode and populates the capacity fields
534
with the most recent data."""
535
_prep_stats_dict(values)
538
session = get_session()
540
with session.begin(subtransactions=True):
541
compute_node_ref = models.ComputeNode()
542
session.add(compute_node_ref)
543
compute_node_ref.update(values)
544
return compute_node_ref
547
def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
549
existing = model_query(context, models.ComputeNodeStat, session=session,
550
read_deleted="no").filter_by(compute_node_id=compute_id).all()
552
for stat in existing:
557
for k, v in new_stats.iteritems():
558
old_stat = statmap.pop(k, None)
560
# update existing value:
561
old_stat.update({'value': v})
562
stats.append(old_stat)
565
stat = models.ComputeNodeStat()
566
stat['compute_node_id'] = compute_id
572
# prune un-touched old stats:
573
for stat in statmap.values():
575
stat.update({'deleted': True})
577
# add new and updated stats
582
@require_admin_context
583
def compute_node_update(context, compute_id, values, prune_stats=False):
584
"""Updates the ComputeNode record with the most recent data"""
585
stats = values.pop('stats', {})
587
session = get_session()
588
with session.begin(subtransactions=True):
589
_update_stats(context, stats, compute_id, session, prune_stats)
590
compute_ref = compute_node_get(context, compute_id, session=session)
591
compute_ref.update(values)
595
def compute_node_get_by_host(context, host):
596
"""Get all capacity entries for the given host."""
597
session = get_session()
598
with session.begin():
599
node = session.query(models.ComputeNode).\
601
filter(models.Service.host == host).\
602
filter_by(deleted=False)
606
def compute_node_statistics(context):
607
"""Compute statistics over all compute nodes."""
608
result = model_query(context,
609
func.count(models.ComputeNode.id),
610
func.sum(models.ComputeNode.vcpus),
611
func.sum(models.ComputeNode.memory_mb),
612
func.sum(models.ComputeNode.local_gb),
613
func.sum(models.ComputeNode.vcpus_used),
614
func.sum(models.ComputeNode.memory_mb_used),
615
func.sum(models.ComputeNode.local_gb_used),
616
func.sum(models.ComputeNode.free_ram_mb),
617
func.sum(models.ComputeNode.free_disk_gb),
618
func.sum(models.ComputeNode.current_workload),
619
func.sum(models.ComputeNode.running_vms),
620
func.sum(models.ComputeNode.disk_available_least),
621
read_deleted="no").first()
623
# Build a dict of the info--making no assumptions about result
624
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
625
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
626
'current_workload', 'running_vms', 'disk_available_least')
627
return dict((field, int(result[idx] or 0))
628
for idx, field in enumerate(fields))
634
@require_admin_context
635
def certificate_get(context, certificate_id, session=None):
636
result = model_query(context, models.Certificate, session=session).\
637
filter_by(id=certificate_id).\
641
raise exception.CertificateNotFound(certificate_id=certificate_id)
646
@require_admin_context
647
def certificate_create(context, values):
648
certificate_ref = models.Certificate()
649
for (key, value) in values.iteritems():
650
certificate_ref[key] = value
651
certificate_ref.save()
652
return certificate_ref
655
@require_admin_context
656
def certificate_get_all_by_project(context, project_id):
657
return model_query(context, models.Certificate, read_deleted="no").\
658
filter_by(project_id=project_id).\
662
@require_admin_context
663
def certificate_get_all_by_user(context, user_id):
664
return model_query(context, models.Certificate, read_deleted="no").\
665
filter_by(user_id=user_id).\
669
@require_admin_context
670
def certificate_get_all_by_user_and_project(context, user_id, project_id):
671
return model_query(context, models.Certificate, read_deleted="no").\
672
filter_by(user_id=user_id).\
673
filter_by(project_id=project_id).\
681
def floating_ip_get(context, id):
682
result = model_query(context, models.FloatingIp, project_only=True).\
687
raise exception.FloatingIpNotFound(id=id)
693
def floating_ip_get_pools(context):
695
for result in model_query(context, models.FloatingIp.pool).distinct():
696
pools.append({'name': result[0]})
701
def floating_ip_allocate_address(context, project_id, pool):
702
authorize_project_context(context, project_id)
703
session = get_session()
704
with session.begin():
705
floating_ip_ref = model_query(context, models.FloatingIp,
706
session=session, read_deleted="no").\
707
filter_by(fixed_ip_id=None).\
708
filter_by(project_id=None).\
709
filter_by(pool=pool).\
710
with_lockmode('update').\
712
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
713
# then this has concurrency issues
714
if not floating_ip_ref:
715
raise exception.NoMoreFloatingIps()
716
floating_ip_ref['project_id'] = project_id
717
session.add(floating_ip_ref)
718
return floating_ip_ref['address']
722
def floating_ip_bulk_create(context, ips):
724
for floating in _floating_ip_get_all(context).all():
725
existing_ips[floating['address']] = floating
727
session = get_session()
728
with session.begin():
731
if (addr in existing_ips and
732
ip.get('id') != existing_ips[addr]['id']):
733
raise exception.FloatingIpExists(**dict(existing_ips[addr]))
735
model = models.FloatingIp()
740
def _ip_range_splitter(ips, block_size=256):
741
"""Yields blocks of IPs no more than block_size elements long."""
745
out.append(ip['address'])
748
if count > block_size - 1:
758
def floating_ip_bulk_destroy(context, ips):
759
session = get_session()
760
with session.begin():
761
for ip_block in _ip_range_splitter(ips):
762
model_query(context, models.FloatingIp).\
763
filter(models.FloatingIp.address.in_(ip_block)).\
764
update({'deleted': True,
765
'deleted_at': timeutils.utcnow()},
766
synchronize_session='fetch')
770
def floating_ip_create(context, values, session=None):
772
session = get_session()
774
floating_ip_ref = models.FloatingIp()
775
floating_ip_ref.update(values)
777
# check uniqueness for not deleted addresses
778
if not floating_ip_ref.deleted:
780
floating_ip = floating_ip_get_by_address(context,
781
floating_ip_ref.address,
783
except exception.FloatingIpNotFoundForAddress:
786
if floating_ip.id != floating_ip_ref.id:
787
raise exception.FloatingIpExists(**dict(floating_ip_ref))
789
floating_ip_ref.save(session=session)
790
return floating_ip_ref['address']
794
def floating_ip_count_by_project(context, project_id, session=None):
795
authorize_project_context(context, project_id)
796
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
797
return model_query(context, models.FloatingIp, read_deleted="no",
799
filter_by(project_id=project_id).\
800
filter_by(auto_assigned=False).\
805
def floating_ip_fixed_ip_associate(context, floating_address,
806
fixed_address, host):
807
session = get_session()
808
with session.begin():
809
floating_ip_ref = floating_ip_get_by_address(context,
812
fixed_ip_ref = fixed_ip_get_by_address(context,
815
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
817
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
818
floating_ip_ref.host = host
819
floating_ip_ref.save(session=session)
824
def floating_ip_deallocate(context, address):
825
session = get_session()
826
with session.begin():
827
floating_ip_ref = floating_ip_get_by_address(context,
830
floating_ip_ref['project_id'] = None
831
floating_ip_ref['host'] = None
832
floating_ip_ref['auto_assigned'] = False
833
floating_ip_ref.save(session=session)
837
def floating_ip_destroy(context, address):
838
session = get_session()
839
with session.begin():
840
floating_ip_ref = floating_ip_get_by_address(context,
843
floating_ip_ref.delete(session=session)
847
def floating_ip_disassociate(context, address):
848
session = get_session()
849
with session.begin():
850
floating_ip_ref = model_query(context,
853
filter_by(address=address).\
855
if not floating_ip_ref:
856
raise exception.FloatingIpNotFoundForAddress(address=address)
858
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
859
filter_by(id=floating_ip_ref['fixed_ip_id']).\
862
fixed_ip_address = fixed_ip_ref['address']
864
fixed_ip_address = None
865
floating_ip_ref.fixed_ip_id = None
866
floating_ip_ref.host = None
867
floating_ip_ref.save(session=session)
868
return fixed_ip_address
872
def floating_ip_set_auto_assigned(context, address):
873
session = get_session()
874
with session.begin():
875
floating_ip_ref = floating_ip_get_by_address(context,
878
floating_ip_ref.auto_assigned = True
879
floating_ip_ref.save(session=session)
882
def _floating_ip_get_all(context, session=None):
883
return model_query(context, models.FloatingIp, read_deleted="no",
887
@require_admin_context
888
def floating_ip_get_all(context):
889
floating_ip_refs = _floating_ip_get_all(context).all()
890
if not floating_ip_refs:
891
raise exception.NoFloatingIpsDefined()
892
return floating_ip_refs
895
@require_admin_context
896
def floating_ip_get_all_by_host(context, host):
897
floating_ip_refs = _floating_ip_get_all(context).\
898
filter_by(host=host).\
900
if not floating_ip_refs:
901
raise exception.FloatingIpNotFoundForHost(host=host)
902
return floating_ip_refs
906
def floating_ip_get_all_by_project(context, project_id):
907
authorize_project_context(context, project_id)
908
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
909
return _floating_ip_get_all(context).\
910
filter_by(project_id=project_id).\
911
filter_by(auto_assigned=False).\
916
def floating_ip_get_by_address(context, address, session=None):
917
result = model_query(context, models.FloatingIp, session=session).\
918
filter_by(address=address).\
922
raise exception.FloatingIpNotFoundForAddress(address=address)
924
# If the floating IP has a project ID set, check to make sure
925
# the non-admin user has access.
926
if result.project_id and is_user_context(context):
927
authorize_project_context(context, result.project_id)
933
def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
935
session = get_session()
937
fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
938
fixed_ip_id = fixed_ip['id']
940
return model_query(context, models.FloatingIp, session=session).\
941
filter_by(fixed_ip_id=fixed_ip_id).\
944
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
948
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
950
session = get_session()
952
return model_query(context, models.FloatingIp, session=session).\
953
filter_by(fixed_ip_id=fixed_ip_id).\
958
def floating_ip_update(context, address, values):
959
session = get_session()
960
with session.begin():
961
floating_ip_ref = floating_ip_get_by_address(context, address, session)
962
for (key, value) in values.iteritems():
963
floating_ip_ref[key] = value
964
floating_ip_ref.save(session=session)
968
def _dnsdomain_get(context, session, fqdomain):
969
return model_query(context, models.DNSDomain,
970
session=session, read_deleted="no").\
971
filter_by(domain=fqdomain).\
972
with_lockmode('update').\
977
def dnsdomain_get(context, fqdomain):
978
session = get_session()
979
with session.begin():
980
return _dnsdomain_get(context, session, fqdomain)
983
@require_admin_context
984
def _dnsdomain_get_or_create(context, session, fqdomain):
985
domain_ref = _dnsdomain_get(context, session, fqdomain)
987
dns_ref = models.DNSDomain()
988
dns_ref.update({'domain': fqdomain,
989
'availability_zone': None,
996
@require_admin_context
997
def dnsdomain_register_for_zone(context, fqdomain, zone):
998
session = get_session()
999
with session.begin():
1000
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
1001
domain_ref.scope = 'private'
1002
domain_ref.availability_zone = zone
1003
domain_ref.save(session=session)
1006
@require_admin_context
1007
def dnsdomain_register_for_project(context, fqdomain, project):
1008
session = get_session()
1009
with session.begin():
1010
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
1011
domain_ref.scope = 'public'
1012
domain_ref.project_id = project
1013
domain_ref.save(session=session)
1016
@require_admin_context
1017
def dnsdomain_unregister(context, fqdomain):
1018
session = get_session()
1019
with session.begin():
1020
session.query(models.DNSDomain).\
1021
filter_by(domain=fqdomain).\
1026
def dnsdomain_list(context):
1027
session = get_session()
1028
records = model_query(context, models.DNSDomain,
1029
session=session, read_deleted="no").\
1032
for record in records:
1033
domains.append(record.domain)
1041
@require_admin_context
1042
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
1044
"""Keyword arguments:
1045
reserved -- should be a boolean value(True or False), exact value will be
1046
used to filter on the fixed ip address
1048
if not utils.is_uuid_like(instance_uuid):
1049
raise exception.InvalidUUID(uuid=instance_uuid)
1051
session = get_session()
1052
with session.begin():
1053
network_or_none = or_(models.FixedIp.network_id == network_id,
1054
models.FixedIp.network_id == None)
1055
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
1056
read_deleted="no").\
1057
filter(network_or_none).\
1058
filter_by(reserved=reserved).\
1059
filter_by(address=address).\
1060
with_lockmode('update').\
1062
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1063
# then this has concurrency issues
1064
if fixed_ip_ref is None:
1065
raise exception.FixedIpNotFoundForNetwork(address=address,
1066
network_id=network_id)
1067
if fixed_ip_ref.instance_uuid:
1068
raise exception.FixedIpAlreadyInUse(address=address)
1070
if not fixed_ip_ref.network_id:
1071
fixed_ip_ref.network_id = network_id
1072
fixed_ip_ref.instance_uuid = instance_uuid
1073
session.add(fixed_ip_ref)
1074
return fixed_ip_ref['address']
1077
@require_admin_context
1078
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
1080
if instance_uuid and not utils.is_uuid_like(instance_uuid):
1081
raise exception.InvalidUUID(uuid=instance_uuid)
1083
session = get_session()
1084
with session.begin():
1085
network_or_none = or_(models.FixedIp.network_id == network_id,
1086
models.FixedIp.network_id == None)
1087
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
1088
read_deleted="no").\
1089
filter(network_or_none).\
1090
filter_by(reserved=False).\
1091
filter_by(instance_uuid=None).\
1092
filter_by(host=None).\
1093
with_lockmode('update').\
1095
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1096
# then this has concurrency issues
1097
if not fixed_ip_ref:
1098
raise exception.NoMoreFixedIps()
1100
if fixed_ip_ref['network_id'] is None:
1101
fixed_ip_ref['network'] = network_id
1104
fixed_ip_ref['instance_uuid'] = instance_uuid
1107
fixed_ip_ref['host'] = host
1108
session.add(fixed_ip_ref)
1109
return fixed_ip_ref['address']
1113
def fixed_ip_create(context, values):
1114
fixed_ip_ref = models.FixedIp()
1115
fixed_ip_ref.update(values)
1117
return fixed_ip_ref['address']
1121
def fixed_ip_bulk_create(context, ips):
1122
session = get_session()
1123
with session.begin():
1125
model = models.FixedIp()
1131
def fixed_ip_disassociate(context, address):
1132
session = get_session()
1133
with session.begin():
1134
fixed_ip_ref = fixed_ip_get_by_address(context,
1137
fixed_ip_ref['instance_uuid'] = None
1138
fixed_ip_ref.save(session=session)
1141
@require_admin_context
1142
def fixed_ip_disassociate_all_by_timeout(context, host, time):
1143
session = get_session()
1144
# NOTE(vish): only update fixed ips that "belong" to this
1145
# host; i.e. the network host or the instance
1146
# host matches. Two queries necessary because
1147
# join with update doesn't work.
1148
host_filter = or_(and_(models.Instance.host == host,
1149
models.Network.multi_host == True),
1150
models.Network.host == host)
1151
result = session.query(models.FixedIp.id).\
1152
filter(models.FixedIp.deleted == False).\
1153
filter(models.FixedIp.allocated == False).\
1154
filter(models.FixedIp.updated_at < time).\
1155
join((models.Network,
1156
models.Network.id == models.FixedIp.network_id)).\
1157
join((models.Instance,
1158
models.Instance.uuid == \
1159
models.FixedIp.instance_uuid)).\
1160
filter(host_filter).\
1162
fixed_ip_ids = [fip[0] for fip in result]
1163
if not fixed_ip_ids:
1165
result = model_query(context, models.FixedIp, session=session).\
1166
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
1167
update({'instance_uuid': None,
1169
'updated_at': timeutils.utcnow()},
1170
synchronize_session='fetch')
1175
def fixed_ip_get(context, id):
1176
result = model_query(context, models.FixedIp).\
1180
raise exception.FixedIpNotFound(id=id)
1182
# FIXME(sirp): shouldn't we just use project_only here to restrict the
1184
if is_user_context(context) and result['instance_uuid'] is not None:
1185
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
1186
result['instance_uuid'])
1187
authorize_project_context(context, instance.project_id)
1192
@require_admin_context
1193
def fixed_ip_get_all(context, session=None):
1194
result = model_query(context, models.FixedIp, session=session,
1195
read_deleted="yes").\
1198
raise exception.NoFixedIpsDefined()
1204
def fixed_ip_get_by_address(context, address, session=None):
1205
result = model_query(context, models.FixedIp, session=session).\
1206
filter_by(address=address).\
1209
raise exception.FixedIpNotFoundForAddress(address=address)
1211
# NOTE(sirp): shouldn't we just use project_only here to restrict the
1213
if is_user_context(context) and result['instance_uuid'] is not None:
1214
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
1215
result['instance_uuid'],
1217
authorize_project_context(context, instance.project_id)
1223
def fixed_ip_get_by_instance(context, instance_uuid):
1224
if not utils.is_uuid_like(instance_uuid):
1225
raise exception.InvalidUUID(uuid=instance_uuid)
1227
result = model_query(context, models.FixedIp, read_deleted="no").\
1228
filter_by(instance_uuid=instance_uuid).\
1232
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
1238
def fixed_ip_get_by_network_host(context, network_id, host):
1239
result = model_query(context, models.FixedIp, read_deleted="no").\
1240
filter_by(network_id=network_id).\
1241
filter_by(host=host).\
1245
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
1251
def fixed_ips_by_virtual_interface(context, vif_id):
1252
result = model_query(context, models.FixedIp, read_deleted="no").\
1253
filter_by(virtual_interface_id=vif_id).\
1259
@require_admin_context
1260
def fixed_ip_get_network(context, address):
1261
fixed_ip_ref = fixed_ip_get_by_address(context, address)
1262
return fixed_ip_ref.network
1266
def fixed_ip_update(context, address, values):
1267
session = get_session()
1268
with session.begin():
1269
fixed_ip_ref = fixed_ip_get_by_address(context,
1272
fixed_ip_ref.update(values)
1273
fixed_ip_ref.save(session=session)
1280
def virtual_interface_create(context, values):
1281
"""Create a new virtual interface record in the database.
1283
:param values: = dict containing column values
1286
vif_ref = models.VirtualInterface()
1287
vif_ref.update(values)
1289
except exception.DBError:
1290
raise exception.VirtualInterfaceCreateException()
1296
def _virtual_interface_query(context, session=None):
1297
return model_query(context, models.VirtualInterface, session=session,
1302
def virtual_interface_get(context, vif_id, session=None):
1303
"""Gets a virtual interface from the table.
1305
:param vif_id: = id of the virtual interface
1307
vif_ref = _virtual_interface_query(context, session=session).\
1308
filter_by(id=vif_id).\
1314
def virtual_interface_get_by_address(context, address):
1315
"""Gets a virtual interface from the table.
1317
:param address: = the address of the interface you're looking to get
1319
vif_ref = _virtual_interface_query(context).\
1320
filter_by(address=address).\
1326
def virtual_interface_get_by_uuid(context, vif_uuid):
1327
"""Gets a virtual interface from the table.
1329
:param vif_uuid: the uuid of the interface you're looking to get
1331
vif_ref = _virtual_interface_query(context).\
1332
filter_by(uuid=vif_uuid).\
1338
@require_instance_exists_using_uuid
1339
def virtual_interface_get_by_instance(context, instance_uuid):
1340
"""Gets all virtual interfaces for instance.
1342
:param instance_uuid: = uuid of the instance to retrieve vifs for
1344
vif_refs = _virtual_interface_query(context).\
1345
filter_by(instance_uuid=instance_uuid).\
1351
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
1353
"""Gets virtual interface for instance that's associated with network."""
1354
vif_ref = _virtual_interface_query(context).\
1355
filter_by(instance_uuid=instance_uuid).\
1356
filter_by(network_id=network_id).\
1362
def virtual_interface_delete(context, vif_id):
1363
"""Delete virtual interface record from the database.
1365
:param vif_id: = id of vif to delete
1367
session = get_session()
1368
vif_ref = virtual_interface_get(context, vif_id, session)
1369
with session.begin():
1370
session.delete(vif_ref)
1374
def virtual_interface_delete_by_instance(context, instance_uuid):
1375
"""Delete virtual interface records that are associated
1376
with the instance given by instance_id.
1378
:param instance_uuid: = uuid of instance
1380
vif_refs = virtual_interface_get_by_instance(context, instance_uuid)
1381
for vif_ref in vif_refs:
1382
virtual_interface_delete(context, vif_ref['id'])
1386
def virtual_interface_get_all(context):
1388
vif_refs = _virtual_interface_query(context).all()
1395
def _metadata_refs(metadata_dict, meta_class):
1398
for k, v in metadata_dict.iteritems():
1399
metadata_ref = meta_class()
1400
metadata_ref['key'] = k
1401
metadata_ref['value'] = v
1402
metadata_refs.append(metadata_ref)
1403
return metadata_refs
1407
def instance_create(context, values):
1408
"""Create a new Instance record in the database.
1410
context - request context object
1411
values - dict containing column values.
1413
values = values.copy()
1414
values['metadata'] = _metadata_refs(
1415
values.get('metadata'), models.InstanceMetadata)
1417
values['system_metadata'] = _metadata_refs(
1418
values.get('system_metadata'), models.InstanceSystemMetadata)
1420
instance_ref = models.Instance()
1421
if not values.get('uuid'):
1422
values['uuid'] = str(utils.gen_uuid())
1423
instance_ref['info_cache'] = models.InstanceInfoCache()
1424
info_cache = values.pop('info_cache', None)
1425
if info_cache is not None:
1426
instance_ref['info_cache'].update(info_cache)
1427
security_groups = values.pop('security_groups', [])
1428
instance_ref.update(values)
1430
def _get_sec_group_models(session, security_groups):
1432
_existed, default_group = security_group_ensure_default(context,
1434
if 'default' in security_groups:
1435
models.append(default_group)
1436
# Generate a new list, so we don't modify the original
1437
security_groups = [x for x in security_groups if x != 'default']
1439
models.extend(_security_group_get_by_names(context,
1440
session, context.project_id, security_groups))
1443
session = get_session()
1444
with session.begin():
1445
instance_ref.security_groups = _get_sec_group_models(session,
1447
instance_ref.save(session=session)
1448
# NOTE(comstud): This forces instance_type to be loaded so it
1449
# exists in the ref when we return. Fixes lazy loading issues.
1450
instance_ref.instance_type
1452
# create the instance uuid to ec2_id mapping entry for instance
1453
ec2_instance_create(context, instance_ref['uuid'])
1458
@require_admin_context
1459
def instance_data_get_for_project(context, project_id, session=None):
1460
result = model_query(context,
1461
func.count(models.Instance.id),
1462
func.sum(models.Instance.vcpus),
1463
func.sum(models.Instance.memory_mb),
1466
filter_by(project_id=project_id).\
1468
# NOTE(vish): convert None to 0
1469
return (result[0] or 0, result[1] or 0, result[2] or 0)
1473
def instance_destroy(context, instance_uuid, constraint=None):
1474
session = get_session()
1475
with session.begin():
1476
if utils.is_uuid_like(instance_uuid):
1477
instance_ref = instance_get_by_uuid(context, instance_uuid,
1480
raise exception.InvalidUUID(instance_uuid)
1482
query = session.query(models.Instance).\
1483
filter_by(uuid=instance_ref['uuid'])
1484
if constraint is not None:
1485
query = constraint.apply(models.Instance, query)
1486
count = query.update({'deleted': True,
1487
'deleted_at': timeutils.utcnow(),
1488
'updated_at': literal_column('updated_at')})
1490
raise exception.ConstraintNotMet()
1491
session.query(models.SecurityGroupInstanceAssociation).\
1492
filter_by(instance_uuid=instance_ref['uuid']).\
1493
update({'deleted': True,
1494
'deleted_at': timeutils.utcnow(),
1495
'updated_at': literal_column('updated_at')})
1497
instance_info_cache_delete(context, instance_ref['uuid'],
1503
def instance_get_by_uuid(context, uuid, session=None):
1504
result = _build_instance_get(context, session=session).\
1505
filter_by(uuid=uuid).\
1509
raise exception.InstanceNotFound(instance_id=uuid)
1515
def instance_get(context, instance_id, session=None):
1516
result = _build_instance_get(context, session=session).\
1517
filter_by(id=instance_id).\
1521
raise exception.InstanceNotFound(instance_id=instance_id)
1527
def _build_instance_get(context, session=None):
1528
return model_query(context, models.Instance, session=session,
1529
project_only=True).\
1530
options(joinedload_all('security_groups.rules')).\
1531
options(joinedload('info_cache')).\
1532
options(joinedload('metadata')).\
1533
options(joinedload('instance_type'))
1536
@require_admin_context
1537
def instance_get_all(context, columns_to_join=None):
1538
if columns_to_join is None:
1539
columns_to_join = ['info_cache', 'security_groups',
1540
'metadata', 'instance_type']
1541
query = model_query(context, models.Instance)
1542
for column in columns_to_join:
1543
query = query.options(joinedload(column))
1548
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
1549
limit=None, marker=None):
1550
"""Return instances that match all filters. Deleted instances
1551
will be returned by default, unless there's a filter that says
1554
sort_fn = {'desc': desc, 'asc': asc}
1556
session = get_session()
1557
query_prefix = session.query(models.Instance).\
1558
options(joinedload('info_cache')).\
1559
options(joinedload('security_groups')).\
1560
options(joinedload('metadata')).\
1561
options(joinedload('instance_type')).\
1562
order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
1564
# Make a copy of the filters dictionary to use going forward, as we'll
1565
# be modifying it and we shouldn't affect the caller's use of it.
1566
filters = filters.copy()
1568
if 'changes-since' in filters:
1569
changes_since = timeutils.normalize_time(filters['changes-since'])
1570
query_prefix = query_prefix.\
1571
filter(models.Instance.updated_at > changes_since)
1573
if 'deleted' in filters:
1574
# Instances can be soft or hard deleted and the query needs to
1575
# include or exclude both
1576
if filters.pop('deleted'):
1577
deleted = or_(models.Instance.deleted == True,
1578
models.Instance.vm_state == vm_states.SOFT_DELETED)
1579
query_prefix = query_prefix.filter(deleted)
1581
query_prefix = query_prefix.\
1582
filter_by(deleted=False).\
1583
filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
1585
if not context.is_admin:
1586
# If we're not admin context, add appropriate filter..
1587
if context.project_id:
1588
filters['project_id'] = context.project_id
1590
filters['user_id'] = context.user_id
1592
# Filters for exact matches that we can do along with the SQL query...
1593
# For other filters that don't match this, we will do regexp matching
1594
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
1595
'vm_state', 'instance_type_id', 'uuid',
1599
query_prefix = exact_filter(query_prefix, models.Instance,
1600
filters, exact_match_filter_names)
1602
query_prefix = regex_filter(query_prefix, models.Instance, filters)
1605
if marker is not None:
1607
marker = instance_get_by_uuid(context, marker, session=session)
1608
except exception.InstanceNotFound as e:
1609
raise exception.MarkerNotFound(marker)
1610
query_prefix = paginate_query(query_prefix, models.Instance, limit,
1611
[sort_key, 'created_at', 'id'],
1615
instances = query_prefix.all()
1619
def regex_filter(query, model, filters):
1620
"""Applies regular expression filtering to a query.
1622
Returns the updated query.
1624
:param query: query to apply filters to
1625
:param model: model object the query applies to
1626
:param filters: dictionary of filters with regex values
1632
'oracle': 'REGEXP_LIKE',
1635
db_string = FLAGS.sql_connection.split(':')[0].split('+')[0]
1636
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
1637
for filter_name in filters.iterkeys():
1639
column_attr = getattr(model, filter_name)
1640
except AttributeError:
1642
if 'property' == type(column_attr).__name__:
1644
query = query.filter(column_attr.op(db_regexp_op)(
1645
str(filters[filter_name])))
1650
def instance_get_active_by_window(context, begin, end=None,
1651
project_id=None, host=None):
1652
"""Return instances that were active during window."""
1653
session = get_session()
1654
query = session.query(models.Instance)
1656
query = query.filter(or_(models.Instance.terminated_at == None,
1657
models.Instance.terminated_at > begin))
1659
query = query.filter(models.Instance.launched_at < end)
1661
query = query.filter_by(project_id=project_id)
1663
query = query.filter_by(host=host)
1668
@require_admin_context
1669
def instance_get_active_by_window_joined(context, begin, end=None,
1670
project_id=None, host=None):
1671
"""Return instances and joins that were active during window."""
1672
session = get_session()
1673
query = session.query(models.Instance)
1675
query = query.options(joinedload('info_cache')).\
1676
options(joinedload('security_groups')).\
1677
options(joinedload('metadata')).\
1678
options(joinedload('instance_type')).\
1679
filter(or_(models.Instance.terminated_at == None,
1680
models.Instance.terminated_at > begin))
1682
query = query.filter(models.Instance.launched_at < end)
1684
query = query.filter_by(project_id=project_id)
1686
query = query.filter_by(host=host)
1691
@require_admin_context
1692
def _instance_get_all_query(context, project_only=False):
1693
return model_query(context, models.Instance, project_only=project_only).\
1694
options(joinedload('info_cache')).\
1695
options(joinedload('security_groups')).\
1696
options(joinedload('metadata')).\
1697
options(joinedload('instance_type'))
1700
@require_admin_context
1701
def instance_get_all_by_host(context, host):
1702
return _instance_get_all_query(context).filter_by(host=host).all()
1705
@require_admin_context
1706
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
1707
return _instance_get_all_query(context).filter_by(host=host).\
1708
filter(models.Instance.instance_type_id != type_id).all()
1712
def instance_get_all_by_project(context, project_id):
1713
authorize_project_context(context, project_id)
1714
return _instance_get_all_query(context).\
1715
filter_by(project_id=project_id).\
1720
def instance_get_all_by_reservation(context, reservation_id):
1721
return _instance_get_all_query(context, project_only=True).\
1722
filter_by(reservation_id=reservation_id).\
1726
# NOTE(jkoelker) This is only being left here for compat with floating
1727
# ips. Currently the network_api doesn't return floaters
1728
# in network_info. Once it starts return the model. This
1729
# function and its call in compute/manager.py on 1829 can
1732
def instance_get_floating_address(context, instance_id):
1733
instance = instance_get(context, instance_id)
1734
fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
1739
# NOTE(tr3buchet): this only gets the first fixed_ip
1740
# won't find floating ips associated with other fixed_ips
1741
floating_ips = floating_ip_get_by_fixed_address(context,
1742
fixed_ips[0]['address'])
1743
if not floating_ips:
1745
# NOTE(vish): this just returns the first floating ip
1746
return floating_ips[0]['address']
1749
@require_admin_context
1750
def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
1751
reboot_window = (timeutils.utcnow() -
1752
datetime.timedelta(seconds=reboot_window))
1755
session = get_session()
1757
results = session.query(models.Instance).\
1758
filter(models.Instance.updated_at <= reboot_window).\
1759
filter_by(task_state="rebooting").all()
1765
def instance_test_and_set(context, instance_uuid, attr, ok_states,
1766
new_state, session=None):
1767
"""Atomically check if an instance is in a valid state, and if it is, set
1768
the instance into a new state.
1771
session = get_session()
1773
with session.begin():
1774
query = model_query(context, models.Instance, session=session,
1777
if utils.is_uuid_like(instance_uuid):
1778
query = query.filter_by(uuid=instance_uuid)
1780
raise exception.InvalidUUID(instance_uuid)
1782
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
1783
# then this has concurrency issues
1784
instance = query.with_lockmode('update').first()
1786
state = instance[attr]
1787
if state not in ok_states:
1788
raise exception.InstanceInvalidState(
1790
instance_uuid=instance['uuid'],
1792
method='instance_test_and_set')
1794
instance[attr] = new_state
1795
instance.save(session=session)
1799
def instance_update(context, instance_uuid, values):
1800
instance_ref = _instance_update(context, instance_uuid, values)[1]
1805
def instance_update_and_get_original(context, instance_uuid, values):
1806
"""Set the given properties on an instance and update it. Return
1807
a shallow copy of the original instance reference, as well as the
1810
:param context: = request context object
1811
:param instance_uuid: = instance uuid
1812
:param values: = dict containing column values
1814
If "expected_task_state" exists in values, the update can only happen
1815
when the task state before update matches expected_task_state. Otherwise
1816
a UnexpectedTaskStateError is thrown.
1818
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
1820
Raises NotFound if instance does not exist.
1822
return _instance_update(context, instance_uuid, values,
1823
copy_old_instance=True)
1826
def _instance_update(context, instance_uuid, values, copy_old_instance=False):
1827
session = get_session()
1829
if not utils.is_uuid_like(instance_uuid):
1830
raise exception.InvalidUUID(instance_uuid)
1832
with session.begin():
1833
instance_ref = instance_get_by_uuid(context, instance_uuid,
1835
if "expected_task_state" in values:
1836
# it is not a db column so always pop out
1837
expected = values.pop("expected_task_state")
1838
if not isinstance(expected, (tuple, list, set)):
1839
expected = (expected,)
1840
actual_state = instance_ref["task_state"]
1841
if actual_state not in expected:
1842
raise exception.UnexpectedTaskStateError(actual=actual_state,
1845
if copy_old_instance:
1846
old_instance_ref = copy.copy(instance_ref)
1848
old_instance_ref = None
1850
metadata = values.get('metadata')
1851
if metadata is not None:
1852
instance_metadata_update(context, instance_ref['uuid'],
1853
values.pop('metadata'), True,
1856
system_metadata = values.get('system_metadata')
1857
if system_metadata is not None:
1858
instance_system_metadata_update(
1859
context, instance_ref['uuid'], values.pop('system_metadata'),
1860
delete=True, session=session)
1862
instance_ref.update(values)
1863
instance_ref.save(session=session)
1865
return (old_instance_ref, instance_ref)
1868
def instance_add_security_group(context, instance_uuid, security_group_id):
1869
"""Associate the given security group with the given instance"""
1870
session = get_session()
1871
with session.begin():
1872
instance_ref = instance_get_by_uuid(context, instance_uuid,
1874
security_group_ref = security_group_get(context,
1877
instance_ref.security_groups += [security_group_ref]
1878
instance_ref.save(session=session)
1882
def instance_remove_security_group(context, instance_uuid, security_group_id):
1883
"""Disassociate the given security group from the given instance"""
1884
session = get_session()
1885
instance_ref = instance_get_by_uuid(context, instance_uuid,
1887
session.query(models.SecurityGroupInstanceAssociation).\
1888
filter_by(instance_uuid=instance_ref['uuid']).\
1889
filter_by(security_group_id=security_group_id).\
1890
update({'deleted': True,
1891
'deleted_at': timeutils.utcnow(),
1892
'updated_at': literal_column('updated_at')})
1899
def instance_info_cache_create(context, values):
1900
"""Create a new instance cache record in the table.
1902
:param context: = request context object
1903
:param values: = dict containing column values
1905
info_cache = models.InstanceInfoCache()
1906
info_cache.update(values)
1908
session = get_session()
1909
with session.begin():
1910
info_cache.save(session=session)
1915
def instance_info_cache_get(context, instance_uuid, session=None):
1916
"""Gets an instance info cache from the table.
1918
:param instance_uuid: = uuid of the info cache's instance
1919
:param session: = optional session object
1921
session = session or get_session()
1923
info_cache = session.query(models.InstanceInfoCache).\
1924
filter_by(instance_uuid=instance_uuid).\
1930
def instance_info_cache_update(context, instance_uuid, values,
1932
"""Update an instance info cache record in the table.
1934
:param instance_uuid: = uuid of info cache's instance
1935
:param values: = dict containing column values to update
1936
:param session: = optional session object
1938
session = session or get_session()
1939
info_cache = instance_info_cache_get(context, instance_uuid,
1942
# NOTE(tr3buchet): let's leave it alone if it's already deleted
1943
if info_cache['deleted']:
1946
info_cache.update(values)
1947
info_cache.save(session=session)
1949
# NOTE(tr3buchet): just in case someone blows away an instance's
1951
values['instance_uuid'] = instance_uuid
1952
info_cache = instance_info_cache_create(context, values)
1958
def instance_info_cache_delete(context, instance_uuid, session=None):
1959
"""Deletes an existing instance_info_cache record
1961
:param instance_uuid: = uuid of the instance tied to the cache record
1962
:param session: = optional session object
1964
values = {'deleted': True,
1965
'deleted_at': timeutils.utcnow()}
1966
instance_info_cache_update(context, instance_uuid, values, session)
1973
def key_pair_create(context, values):
1974
key_pair_ref = models.KeyPair()
1975
key_pair_ref.update(values)
1981
def key_pair_destroy(context, user_id, name):
1982
authorize_user_context(context, user_id)
1983
session = get_session()
1984
with session.begin():
1985
key_pair_ref = key_pair_get(context, user_id, name, session=session)
1986
key_pair_ref.delete(session=session)
1990
def key_pair_destroy_all_by_user(context, user_id):
1991
authorize_user_context(context, user_id)
1992
session = get_session()
1993
with session.begin():
1994
session.query(models.KeyPair).\
1995
filter_by(user_id=user_id).\
1996
update({'deleted': True,
1997
'deleted_at': timeutils.utcnow(),
1998
'updated_at': literal_column('updated_at')})
2002
def key_pair_get(context, user_id, name, session=None):
2003
authorize_user_context(context, user_id)
2004
result = model_query(context, models.KeyPair, session=session).\
2005
filter_by(user_id=user_id).\
2006
filter_by(name=name).\
2010
raise exception.KeypairNotFound(user_id=user_id, name=name)
2016
def key_pair_get_all_by_user(context, user_id):
2017
authorize_user_context(context, user_id)
2018
return model_query(context, models.KeyPair, read_deleted="no").\
2019
filter_by(user_id=user_id).\
2023
def key_pair_count_by_user(context, user_id):
2024
authorize_user_context(context, user_id)
2025
return model_query(context, models.KeyPair, read_deleted="no").\
2026
filter_by(user_id=user_id).\
2033
@require_admin_context
2034
def network_associate(context, project_id, network_id=None, force=False):
2035
"""Associate a project with a network.
2037
called by project_get_networks under certain conditions
2038
and network manager add_network_to_project()
2040
only associate if the project doesn't already have a network
2043
force solves race condition where a fresh project has multiple instance
2044
builds simultaneously picked up by multiple network hosts which attempt
2045
to associate the project with multiple networks
2046
force should only be used as a direct consequence of user request
2047
all automated requests should not use force
2049
session = get_session()
2050
with session.begin():
2052
def network_query(project_filter, id=None):
2053
filter_kwargs = {'project_id': project_filter}
2055
filter_kwargs['id'] = id
2056
return model_query(context, models.Network, session=session,
2057
read_deleted="no").\
2058
filter_by(**filter_kwargs).\
2059
with_lockmode('update').\
2063
# find out if project has a network
2064
network_ref = network_query(project_id)
2066
if force or not network_ref:
2067
# in force mode or project doesn't have a network so associate
2068
# with a new network
2071
network_ref = network_query(None, network_id)
2073
raise db.NoMoreNetworks()
2075
# associate with network
2076
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2077
# then this has concurrency issues
2078
network_ref['project_id'] = project_id
2079
session.add(network_ref)
2083
@require_admin_context
2084
def network_count(context):
2085
return model_query(context, models.Network).count()
2088
@require_admin_context
2089
def _network_ips_query(context, network_id):
2090
return model_query(context, models.FixedIp, read_deleted="no").\
2091
filter_by(network_id=network_id)
2094
@require_admin_context
2095
def network_count_reserved_ips(context, network_id):
2096
return _network_ips_query(context, network_id).\
2097
filter_by(reserved=True).\
2101
@require_admin_context
2102
def network_create_safe(context, values):
2103
if values.get('vlan'):
2104
if model_query(context, models.Network, read_deleted="no")\
2105
.filter_by(vlan=values['vlan'])\
2107
raise exception.DuplicateVlan(vlan=values['vlan'])
2109
network_ref = models.Network()
2110
network_ref['uuid'] = str(utils.gen_uuid())
2111
network_ref.update(values)
2116
except IntegrityError:
2120
@require_admin_context
2121
def network_delete_safe(context, network_id):
2122
session = get_session()
2123
with session.begin():
2124
result = session.query(models.FixedIp).\
2125
filter_by(network_id=network_id).\
2126
filter_by(deleted=False).\
2127
filter_by(allocated=True).\
2130
raise exception.NetworkInUse(network_id=network_id)
2131
network_ref = network_get(context, network_id=network_id,
2133
session.query(models.FixedIp).\
2134
filter_by(network_id=network_id).\
2135
filter_by(deleted=False).\
2136
update({'deleted': True,
2137
'updated_at': literal_column('updated_at'),
2138
'deleted_at': timeutils.utcnow()})
2139
session.delete(network_ref)
2142
@require_admin_context
2143
def network_disassociate(context, network_id):
2144
network_update(context, network_id, {'project_id': None,
2149
def network_get(context, network_id, session=None, project_only='allow_none'):
2150
result = model_query(context, models.Network, session=session,
2151
project_only=project_only).\
2152
filter_by(id=network_id).\
2156
raise exception.NetworkNotFound(network_id=network_id)
2162
def network_get_all(context):
2163
result = model_query(context, models.Network, read_deleted="no").all()
2166
raise exception.NoNetworksFound()
2172
def network_get_all_by_uuids(context, network_uuids,
2173
project_only="allow_none"):
2174
result = model_query(context, models.Network, read_deleted="no",
2175
project_only=project_only).\
2176
filter(models.Network.uuid.in_(network_uuids)).\
2180
raise exception.NoNetworksFound()
2182
#check if the result contains all the networks
2184
for network_uuid in network_uuids:
2186
for network in result:
2187
if network['uuid'] == network_uuid:
2192
raise exception.NetworkNotFoundForProject(
2193
network_uuid=network_uuid, project_id=context.project_id)
2194
raise exception.NetworkNotFound(network_id=network_uuid)
2198
# NOTE(vish): pylint complains because of the long method name, but
2199
# it fits with the names of the rest of the methods
2200
# pylint: disable=C0103
2203
@require_admin_context
2204
def network_get_associated_fixed_ips(context, network_id, host=None):
2205
# FIXME(sirp): since this returns fixed_ips, this would be better named
2206
# fixed_ip_get_all_by_network.
2207
# NOTE(vish): The ugly joins here are to solve a performance issue and
2208
# should be removed once we can add and remove leases
2209
# without regenerating the whole list
2210
vif_and = and_(models.VirtualInterface.id ==
2211
models.FixedIp.virtual_interface_id,
2212
models.VirtualInterface.deleted == False)
2213
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
2214
models.Instance.deleted == False)
2215
session = get_session()
2216
query = session.query(models.FixedIp.address,
2217
models.FixedIp.instance_uuid,
2218
models.FixedIp.network_id,
2219
models.FixedIp.virtual_interface_id,
2220
models.VirtualInterface.address,
2221
models.Instance.hostname,
2222
models.Instance.updated_at,
2223
models.Instance.created_at).\
2224
filter(models.FixedIp.deleted == False).\
2225
filter(models.FixedIp.network_id == network_id).\
2226
filter(models.FixedIp.allocated == True).\
2227
join((models.VirtualInterface, vif_and)).\
2228
join((models.Instance, inst_and)).\
2229
filter(models.FixedIp.instance_uuid != None).\
2230
filter(models.FixedIp.virtual_interface_id != None)
2232
query = query.filter(models.Instance.host == host)
2233
result = query.all()
2235
for datum in result:
2237
cleaned['address'] = datum[0]
2238
cleaned['instance_uuid'] = datum[1]
2239
cleaned['network_id'] = datum[2]
2240
cleaned['vif_id'] = datum[3]
2241
cleaned['vif_address'] = datum[4]
2242
cleaned['instance_hostname'] = datum[5]
2243
cleaned['instance_updated'] = datum[6]
2244
cleaned['instance_created'] = datum[7]
2245
data.append(cleaned)
2249
@require_admin_context
2250
def _network_get_query(context, session=None):
2251
return model_query(context, models.Network, session=session,
2255
@require_admin_context
2256
def network_get_by_bridge(context, bridge):
2257
result = _network_get_query(context).filter_by(bridge=bridge).first()
2260
raise exception.NetworkNotFoundForBridge(bridge=bridge)
2265
@require_admin_context
2266
def network_get_by_uuid(context, uuid):
2267
result = _network_get_query(context).filter_by(uuid=uuid).first()
2270
raise exception.NetworkNotFoundForUUID(uuid=uuid)
2275
@require_admin_context
2276
def network_get_by_cidr(context, cidr):
2277
result = _network_get_query(context).\
2278
filter(or_(models.Network.cidr == cidr,
2279
models.Network.cidr_v6 == cidr)).\
2283
raise exception.NetworkNotFoundForCidr(cidr=cidr)
2288
@require_admin_context
2289
def network_get_by_instance(context, instance_id):
2290
# note this uses fixed IP to get to instance
2291
# only works for networks the instance has an IP from
2292
result = _network_get_query(context).\
2293
filter_by(instance_id=instance_id).\
2297
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
2302
@require_admin_context
2303
def network_get_all_by_instance(context, instance_id):
2304
result = _network_get_query(context).\
2305
filter_by(instance_id=instance_id).\
2309
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
2314
@require_admin_context
2315
def network_get_all_by_host(context, host):
2316
session = get_session()
2317
fixed_ip_query = model_query(context, models.FixedIp.network_id,
2319
filter(models.FixedIp.host == host)
2320
# NOTE(vish): return networks that have host set
2321
# or that have a fixed ip with host set
2322
host_filter = or_(models.Network.host == host,
2323
models.Network.id.in_(fixed_ip_query.subquery()))
2324
return _network_get_query(context, session=session).\
2325
filter(host_filter).\
2329
@require_admin_context
2330
def network_set_host(context, network_id, host_id):
2331
session = get_session()
2332
with session.begin():
2333
network_ref = _network_get_query(context, session=session).\
2334
filter_by(id=network_id).\
2335
with_lockmode('update').\
2339
raise exception.NetworkNotFound(network_id=network_id)
2341
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2342
# then this has concurrency issues
2343
if not network_ref['host']:
2344
network_ref['host'] = host_id
2345
session.add(network_ref)
2347
return network_ref['host']
2351
def network_update(context, network_id, values):
2352
session = get_session()
2353
with session.begin():
2354
network_ref = network_get(context, network_id, session=session)
2355
network_ref.update(values)
2356
network_ref.save(session=session)
2363
@require_admin_context
2364
def iscsi_target_count_by_host(context, host):
2365
return model_query(context, models.IscsiTarget).\
2366
filter_by(host=host).\
2370
@require_admin_context
2371
def iscsi_target_create_safe(context, values):
2372
iscsi_target_ref = models.IscsiTarget()
2374
for (key, value) in values.iteritems():
2375
iscsi_target_ref[key] = value
2377
iscsi_target_ref.save()
2378
return iscsi_target_ref
2379
except IntegrityError:
2387
def quota_get(context, project_id, resource, session=None):
2388
result = model_query(context, models.Quota, session=session,
2389
read_deleted="no").\
2390
filter_by(project_id=project_id).\
2391
filter_by(resource=resource).\
2395
raise exception.ProjectQuotaNotFound(project_id=project_id)
2401
def quota_get_all_by_project(context, project_id):
2402
authorize_project_context(context, project_id)
2404
rows = model_query(context, models.Quota, read_deleted="no").\
2405
filter_by(project_id=project_id).\
2408
result = {'project_id': project_id}
2410
result[row.resource] = row.hard_limit
2415
@require_admin_context
2416
def quota_create(context, project_id, resource, limit):
2417
quota_ref = models.Quota()
2418
quota_ref.project_id = project_id
2419
quota_ref.resource = resource
2420
quota_ref.hard_limit = limit
2425
@require_admin_context
2426
def quota_update(context, project_id, resource, limit):
2427
session = get_session()
2428
with session.begin():
2429
quota_ref = quota_get(context, project_id, resource, session=session)
2430
quota_ref.hard_limit = limit
2431
quota_ref.save(session=session)
2434
@require_admin_context
2435
def quota_destroy(context, project_id, resource):
2436
session = get_session()
2437
with session.begin():
2438
quota_ref = quota_get(context, project_id, resource, session=session)
2439
quota_ref.delete(session=session)
2446
def quota_class_get(context, class_name, resource, session=None):
2447
result = model_query(context, models.QuotaClass, session=session,
2448
read_deleted="no").\
2449
filter_by(class_name=class_name).\
2450
filter_by(resource=resource).\
2454
raise exception.QuotaClassNotFound(class_name=class_name)
2460
def quota_class_get_all_by_name(context, class_name):
2461
authorize_quota_class_context(context, class_name)
2463
rows = model_query(context, models.QuotaClass, read_deleted="no").\
2464
filter_by(class_name=class_name).\
2467
result = {'class_name': class_name}
2469
result[row.resource] = row.hard_limit
2474
@require_admin_context
2475
def quota_class_create(context, class_name, resource, limit):
2476
quota_class_ref = models.QuotaClass()
2477
quota_class_ref.class_name = class_name
2478
quota_class_ref.resource = resource
2479
quota_class_ref.hard_limit = limit
2480
quota_class_ref.save()
2481
return quota_class_ref
2484
@require_admin_context
2485
def quota_class_update(context, class_name, resource, limit):
2486
session = get_session()
2487
with session.begin():
2488
quota_class_ref = quota_class_get(context, class_name, resource,
2490
quota_class_ref.hard_limit = limit
2491
quota_class_ref.save(session=session)
2494
@require_admin_context
2495
def quota_class_destroy(context, class_name, resource):
2496
session = get_session()
2497
with session.begin():
2498
quota_class_ref = quota_class_get(context, class_name, resource,
2500
quota_class_ref.delete(session=session)
2503
@require_admin_context
2504
def quota_class_destroy_all_by_name(context, class_name):
2505
session = get_session()
2506
with session.begin():
2507
quota_classes = model_query(context, models.QuotaClass,
2508
session=session, read_deleted="no").\
2509
filter_by(class_name=class_name).\
2512
for quota_class_ref in quota_classes:
2513
quota_class_ref.delete(session=session)
2520
def quota_usage_get(context, project_id, resource, session=None):
2521
result = model_query(context, models.QuotaUsage, session=session,
2522
read_deleted="no").\
2523
filter_by(project_id=project_id).\
2524
filter_by(resource=resource).\
2528
raise exception.QuotaUsageNotFound(project_id=project_id)
2534
def quota_usage_get_all_by_project(context, project_id):
2535
authorize_project_context(context, project_id)
2537
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
2538
filter_by(project_id=project_id).\
2541
result = {'project_id': project_id}
2543
result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
2548
@require_admin_context
2549
def quota_usage_create(context, project_id, resource, in_use, reserved,
2550
until_refresh, session=None):
2551
quota_usage_ref = models.QuotaUsage()
2552
quota_usage_ref.project_id = project_id
2553
quota_usage_ref.resource = resource
2554
quota_usage_ref.in_use = in_use
2555
quota_usage_ref.reserved = reserved
2556
quota_usage_ref.until_refresh = until_refresh
2557
quota_usage_ref.save(session=session)
2559
return quota_usage_ref
2562
@require_admin_context
2563
def quota_usage_update(context, project_id, resource, in_use, reserved,
2564
until_refresh, session=None):
2565
def do_update(session):
2566
quota_usage_ref = quota_usage_get(context, project_id, resource,
2568
quota_usage_ref.in_use = in_use
2569
quota_usage_ref.reserved = reserved
2570
quota_usage_ref.until_refresh = until_refresh
2571
quota_usage_ref.save(session=session)
2574
# Assume caller started a transaction
2577
session = get_session()
2578
with session.begin():
2582
@require_admin_context
2583
def quota_usage_destroy(context, project_id, resource):
2584
session = get_session()
2585
with session.begin():
2586
quota_usage_ref = quota_usage_get(context, project_id, resource,
2588
quota_usage_ref.delete(session=session)
2595
def reservation_get(context, uuid, session=None):
2596
result = model_query(context, models.Reservation, session=session,
2597
read_deleted="no").\
2598
filter_by(uuid=uuid).\
2602
raise exception.ReservationNotFound(uuid=uuid)
2608
def reservation_get_all_by_project(context, project_id):
2609
authorize_project_context(context, project_id)
2611
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
2612
filter_by(project_id=project_id).\
2615
result = {'project_id': project_id}
2617
result.setdefault(row.resource, {})
2618
result[row.resource][row.uuid] = row.delta
2623
@require_admin_context
2624
def reservation_create(context, uuid, usage, project_id, resource, delta,
2625
expire, session=None):
2626
reservation_ref = models.Reservation()
2627
reservation_ref.uuid = uuid
2628
reservation_ref.usage_id = usage['id']
2629
reservation_ref.project_id = project_id
2630
reservation_ref.resource = resource
2631
reservation_ref.delta = delta
2632
reservation_ref.expire = expire
2633
reservation_ref.save(session=session)
2634
return reservation_ref
2637
@require_admin_context
2638
def reservation_destroy(context, uuid):
2639
session = get_session()
2640
with session.begin():
2641
reservation_ref = reservation_get(context, uuid, session=session)
2642
reservation_ref.delete(session=session)
2648
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
2649
# cause under or over counting of resources. To avoid deadlocks, this
2650
# code always acquires the lock on quota_usages before acquiring the lock
2653
def _get_quota_usages(context, session):
2654
# Broken out for testability
2655
rows = model_query(context, models.QuotaUsage,
2658
filter_by(project_id=context.project_id).\
2659
with_lockmode('update').\
2661
return dict((row.resource, row) for row in rows)
2665
def quota_reserve(context, resources, quotas, deltas, expire,
2666
until_refresh, max_age):
2667
elevated = context.elevated()
2668
session = get_session()
2669
with session.begin():
2670
# Get the current usages
2671
usages = _get_quota_usages(context, session)
2673
# Handle usage refresh
2674
work = set(deltas.keys())
2676
resource = work.pop()
2678
# Do we need to refresh the usage?
2680
if resource not in usages:
2681
usages[resource] = quota_usage_create(elevated,
2685
until_refresh or None,
2688
elif usages[resource].in_use < 0:
2689
# Negative in_use count indicates a desync, so try to
2692
elif usages[resource].until_refresh is not None:
2693
usages[resource].until_refresh -= 1
2694
if usages[resource].until_refresh <= 0:
2696
elif max_age and (usages[resource].updated_at -
2697
timeutils.utcnow()).seconds >= max_age:
2700
# OK, refresh the usage
2702
# Grab the sync routine
2703
sync = resources[resource].sync
2705
updates = sync(elevated, context.project_id, session)
2706
for res, in_use in updates.items():
2707
# Make sure we have a destination for the usage!
2708
if res not in usages:
2709
usages[res] = quota_usage_create(elevated,
2713
until_refresh or None,
2717
usages[res].in_use = in_use
2718
usages[res].until_refresh = until_refresh or None
2720
# Because more than one resource may be refreshed
2721
# by the call to the sync routine, and we don't
2722
# want to double-sync, we make sure all refreshed
2723
# resources are dropped from the work set.
2726
# NOTE(Vek): We make the assumption that the sync
2727
# routine actually refreshes the
2728
# resources that it is the sync routine
2729
# for. We don't check, because this is
2730
# a best-effort mechanism.
2732
# Check for deltas that would go negative
2733
unders = [resource for resource, delta in deltas.items()
2735
delta + usages[resource].in_use < 0]
2737
# Now, let's check the quotas
2738
# NOTE(Vek): We're only concerned about positive increments.
2739
# If a project has gone over quota, we want them to
2740
# be able to reduce their usage without any
2742
overs = [resource for resource, delta in deltas.items()
2743
if quotas[resource] >= 0 and delta >= 0 and
2744
quotas[resource] < delta + usages[resource].total]
2746
# NOTE(Vek): The quota check needs to be in the transaction,
2747
# but the transaction doesn't fail just because
2748
# we're over quota, so the OverQuota raise is
2749
# outside the transaction. If we did the raise
2750
# here, our usage updates would be discarded, but
2751
# they're not invalidated by being over-quota.
2753
# Create the reservations
2756
for resource, delta in deltas.items():
2757
reservation = reservation_create(elevated,
2758
str(utils.gen_uuid()),
2761
resource, delta, expire,
2763
reservations.append(reservation.uuid)
2765
# Also update the reserved quantity
2766
# NOTE(Vek): Again, we are only concerned here about
2767
# positive increments. Here, though, we're
2768
# worried about the following scenario:
2770
# 1) User initiates resize down.
2771
# 2) User allocates a new instance.
2772
# 3) Resize down fails or is reverted.
2773
# 4) User is now over quota.
2775
# To prevent this, we only update the
2776
# reserved value if the delta is positive.
2778
usages[resource].reserved += delta
2780
# Apply updates to the usages table
2781
for usage_ref in usages.values():
2782
usage_ref.save(session=session)
2785
LOG.warning(_("Change will make usage less than 0 for the following "
2786
"resources: %(unders)s") % locals())
2788
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
2789
for k, v in usages.items())
2790
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
2796
def _quota_reservations(session, context, reservations):
2797
"""Return the relevant reservations."""
2799
# Get the listed reservations
2800
return model_query(context, models.Reservation,
2803
filter(models.Reservation.uuid.in_(reservations)).\
2804
with_lockmode('update').\
2809
def reservation_commit(context, reservations):
2810
session = get_session()
2811
with session.begin():
2812
usages = _get_quota_usages(context, session)
2814
for reservation in _quota_reservations(session, context, reservations):
2815
usage = usages[reservation.resource]
2816
if reservation.delta >= 0:
2817
usage.reserved -= reservation.delta
2818
usage.in_use += reservation.delta
2820
reservation.delete(session=session)
2822
for usage in usages.values():
2823
usage.save(session=session)
2827
def reservation_rollback(context, reservations):
2828
session = get_session()
2829
with session.begin():
2830
usages = _get_quota_usages(context, session)
2832
for reservation in _quota_reservations(session, context, reservations):
2833
usage = usages[reservation.resource]
2834
if reservation.delta >= 0:
2835
usage.reserved -= reservation.delta
2837
reservation.delete(session=session)
2839
for usage in usages.values():
2840
usage.save(session=session)
2843
@require_admin_context
2844
def quota_destroy_all_by_project(context, project_id):
2845
session = get_session()
2846
with session.begin():
2847
quotas = model_query(context, models.Quota, session=session,
2848
read_deleted="no").\
2849
filter_by(project_id=project_id).\
2852
for quota_ref in quotas:
2853
quota_ref.delete(session=session)
2855
quota_usages = model_query(context, models.QuotaUsage,
2856
session=session, read_deleted="no").\
2857
filter_by(project_id=project_id).\
2860
for quota_usage_ref in quota_usages:
2861
quota_usage_ref.delete(session=session)
2863
reservations = model_query(context, models.Reservation,
2864
session=session, read_deleted="no").\
2865
filter_by(project_id=project_id).\
2868
for reservation_ref in reservations:
2869
reservation_ref.delete(session=session)
2872
@require_admin_context
2873
def reservation_expire(context):
2874
session = get_session()
2875
with session.begin():
2876
current_time = timeutils.utcnow()
2877
results = model_query(context, models.Reservation, session=session,
2878
read_deleted="no").\
2879
filter(models.Reservation.expire < current_time).\
2883
for reservation in results:
2884
if reservation.delta >= 0:
2885
reservation.usage.reserved -= reservation.delta
2886
reservation.usage.save(session=session)
2888
reservation.delete(session=session)
2894
@require_admin_context
2895
def volume_allocate_iscsi_target(context, volume_id, host):
2896
session = get_session()
2897
with session.begin():
2898
iscsi_target_ref = model_query(context, models.IscsiTarget,
2899
session=session, read_deleted="no").\
2900
filter_by(volume=None).\
2901
filter_by(host=host).\
2902
with_lockmode('update').\
2905
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
2906
# then this has concurrency issues
2907
if not iscsi_target_ref:
2908
raise db.NoMoreTargets()
2910
iscsi_target_ref.volume_id = volume_id
2911
session.add(iscsi_target_ref)
2913
return iscsi_target_ref.target_num
2916
@require_admin_context
2917
def volume_attached(context, volume_id, instance_uuid, mountpoint):
2918
if not utils.is_uuid_like(instance_uuid):
2919
raise exception.InvalidUUID(instance_uuid)
2921
session = get_session()
2922
with session.begin():
2923
volume_ref = volume_get(context, volume_id, session=session)
2924
volume_ref['status'] = 'in-use'
2925
volume_ref['mountpoint'] = mountpoint
2926
volume_ref['attach_status'] = 'attached'
2927
volume_ref['instance_uuid'] = instance_uuid
2928
volume_ref['attach_time'] = timeutils.utcnow()
2929
volume_ref.save(session=session)
2933
def volume_create(context, values):
2934
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
2935
models.VolumeMetadata)
2936
volume_ref = models.Volume()
2937
if not values.get('id'):
2938
values['id'] = str(utils.gen_uuid())
2939
volume_ref.update(values)
2941
session = get_session()
2942
with session.begin():
2943
volume_ref.save(session=session)
2945
return volume_get(context, values['id'], session=session)
2948
@require_admin_context
2949
def volume_data_get_for_project(context, project_id, session=None):
2950
result = model_query(context,
2951
func.count(models.Volume.id),
2952
func.sum(models.Volume.size),
2955
filter_by(project_id=project_id).\
2958
# NOTE(vish): convert None to 0
2959
return (result[0] or 0, result[1] or 0)
2962
@require_admin_context
2963
def volume_destroy(context, volume_id):
2964
session = get_session()
2965
with session.begin():
2966
volume_ref = volume_get(context, volume_id, session=session)
2967
session.query(models.Volume).\
2968
filter_by(id=volume_id).\
2969
update({'deleted': True,
2970
'deleted_at': timeutils.utcnow(),
2971
'updated_at': literal_column('updated_at')})
2972
session.query(models.IscsiTarget).\
2973
filter_by(volume_id=volume_id).\
2974
update({'volume_id': None})
2975
session.query(models.VolumeMetadata).\
2976
filter_by(volume_id=volume_id).\
2977
update({'deleted': True,
2978
'deleted_at': timeutils.utcnow(),
2979
'updated_at': literal_column('updated_at')})
2983
@require_admin_context
2984
def volume_detached(context, volume_id):
2985
session = get_session()
2986
with session.begin():
2987
volume_ref = volume_get(context, volume_id, session=session)
2988
volume_ref['status'] = 'available'
2989
volume_ref['mountpoint'] = None
2990
volume_ref['attach_status'] = 'detached'
2991
volume_ref['instance_uuid'] = None
2992
volume_ref['attach_time'] = None
2993
volume_ref.save(session=session)
2997
def _volume_get_query(context, session=None, project_only=False):
2998
return model_query(context, models.Volume, session=session,
2999
project_only=project_only).\
3000
options(joinedload('volume_metadata')).\
3001
options(joinedload('volume_type'))
3005
def _ec2_volume_get_query(context, session=None):
3006
return model_query(context, models.VolumeIdMapping,
3007
session=session, read_deleted='yes')
3011
def _ec2_snapshot_get_query(context, session=None):
3012
return model_query(context, models.SnapshotIdMapping,
3013
session=session, read_deleted='yes')
3017
def volume_get(context, volume_id, session=None):
3018
result = _volume_get_query(context, session=session, project_only=True).\
3019
filter_by(id=volume_id).\
3023
raise exception.VolumeNotFound(volume_id=volume_id)
3028
@require_admin_context
3029
def volume_get_all(context):
3030
return _volume_get_query(context).all()
3033
@require_admin_context
3034
def volume_get_all_by_host(context, host):
3035
return _volume_get_query(context).filter_by(host=host).all()
3038
@require_admin_context
3039
def volume_get_all_by_instance_uuid(context, instance_uuid):
3040
result = model_query(context, models.Volume, read_deleted="no").\
3041
options(joinedload('volume_metadata')).\
3042
options(joinedload('volume_type')).\
3043
filter_by(instance_uuid=instance_uuid).\
3053
def volume_get_all_by_project(context, project_id):
3054
authorize_project_context(context, project_id)
3055
return _volume_get_query(context).filter_by(project_id=project_id).all()
3058
@require_admin_context
3059
def volume_get_iscsi_target_num(context, volume_id):
3060
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
3061
filter_by(volume_id=volume_id).\
3065
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
3067
return result.target_num
3071
def volume_update(context, volume_id, values):
3072
session = get_session()
3073
volume_ref = volume_get(context, volume_id, session=session)
3074
metadata = values.get('metadata')
3075
if metadata is not None:
3076
volume_metadata_update(context,
3078
values.pop('metadata'),
3080
with session.begin():
3081
volume_ref.update(values)
3082
volume_ref.save(session=session)
3088
def ec2_volume_create(context, volume_uuid, id=None):
3089
"""Create ec2 compatable volume by provided uuid"""
3090
ec2_volume_ref = models.VolumeIdMapping()
3091
ec2_volume_ref.update({'uuid': volume_uuid})
3093
ec2_volume_ref.update({'id': id})
3095
ec2_volume_ref.save()
3097
return ec2_volume_ref
3101
def get_ec2_volume_id_by_uuid(context, volume_id, session=None):
3102
result = _ec2_volume_get_query(context, session=session).\
3103
filter_by(uuid=volume_id).\
3107
raise exception.VolumeNotFound(volume_id=volume_id)
3113
def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
3114
result = _ec2_volume_get_query(context, session=session).\
3115
filter_by(id=ec2_id).\
3119
raise exception.VolumeNotFound(volume_id=ec2_id)
3121
return result['uuid']
3125
def ec2_snapshot_create(context, snapshot_uuid, id=None):
3126
"""Create ec2 compatable snapshot by provided uuid"""
3127
ec2_snapshot_ref = models.SnapshotIdMapping()
3128
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
3130
ec2_snapshot_ref.update({'id': id})
3132
ec2_snapshot_ref.save()
3134
return ec2_snapshot_ref
3138
def get_ec2_snapshot_id_by_uuid(context, snapshot_id, session=None):
3139
result = _ec2_snapshot_get_query(context, session=session).\
3140
filter_by(uuid=snapshot_id).\
3144
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
3150
def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
3151
result = _ec2_snapshot_get_query(context, session=session).\
3152
filter_by(id=ec2_id).\
3156
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
3158
return result['uuid']
3161
####################
3163
def _volume_metadata_get_query(context, volume_id, session=None):
3164
return model_query(context, models.VolumeMetadata,
3165
session=session, read_deleted="no").\
3166
filter_by(volume_id=volume_id)
3170
@require_volume_exists
3171
def volume_metadata_get(context, volume_id):
3172
rows = _volume_metadata_get_query(context, volume_id).all()
3175
result[row['key']] = row['value']
3181
@require_volume_exists
3182
def volume_metadata_delete(context, volume_id, key):
3183
_volume_metadata_get_query(context, volume_id).\
3184
filter_by(key=key).\
3185
update({'deleted': True,
3186
'deleted_at': timeutils.utcnow(),
3187
'updated_at': literal_column('updated_at')})
3191
@require_volume_exists
3192
def volume_metadata_get_item(context, volume_id, key, session=None):
3193
result = _volume_metadata_get_query(context, volume_id, session=session).\
3194
filter_by(key=key).\
3198
raise exception.VolumeMetadataNotFound(metadata_key=key,
3199
volume_id=volume_id)
3204
@require_volume_exists
3205
def volume_metadata_update(context, volume_id, metadata, delete):
3206
session = get_session()
3208
# Set existing metadata to deleted if delete argument is True
3210
original_metadata = volume_metadata_get(context, volume_id)
3211
for meta_key, meta_value in original_metadata.iteritems():
3212
if meta_key not in metadata:
3213
meta_ref = volume_metadata_get_item(context, volume_id,
3215
meta_ref.update({'deleted': True})
3216
meta_ref.save(session=session)
3220
# Now update all existing items with new values, or create new meta objects
3221
for meta_key, meta_value in metadata.iteritems():
3223
# update the value whether it exists or not
3224
item = {"value": meta_value}
3227
meta_ref = volume_metadata_get_item(context, volume_id,
3229
except exception.VolumeMetadataNotFound, e:
3230
meta_ref = models.VolumeMetadata()
3231
item.update({"key": meta_key, "volume_id": volume_id})
3233
meta_ref.update(item)
3234
meta_ref.save(session=session)
3243
def snapshot_create(context, values):
3244
snapshot_ref = models.Snapshot()
3245
if not values.get('id'):
3246
values['id'] = str(utils.gen_uuid())
3247
snapshot_ref.update(values)
3249
session = get_session()
3250
with session.begin():
3251
snapshot_ref.save(session=session)
3255
@require_admin_context
3256
def snapshot_destroy(context, snapshot_id):
3257
session = get_session()
3258
with session.begin():
3259
session.query(models.Snapshot).\
3260
filter_by(id=snapshot_id).\
3261
update({'deleted': True,
3262
'deleted_at': timeutils.utcnow(),
3263
'updated_at': literal_column('updated_at')})
3267
def snapshot_get(context, snapshot_id, session=None):
3268
result = model_query(context, models.Snapshot, session=session,
3269
project_only=True).\
3270
filter_by(id=snapshot_id).\
3274
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
3279
@require_admin_context
3280
def snapshot_get_all(context):
3281
return model_query(context, models.Snapshot).all()
3285
def snapshot_get_all_for_volume(context, volume_id):
3286
return model_query(context, models.Snapshot, read_deleted='no',
3287
project_only=True).\
3288
filter_by(volume_id=volume_id).all()
3292
def snapshot_get_all_by_project(context, project_id):
3293
authorize_project_context(context, project_id)
3294
return model_query(context, models.Snapshot).\
3295
filter_by(project_id=project_id).\
3300
def snapshot_update(context, snapshot_id, values):
3301
session = get_session()
3302
with session.begin():
3303
snapshot_ref = snapshot_get(context, snapshot_id, session=session)
3304
snapshot_ref.update(values)
3305
snapshot_ref.save(session=session)
3311
def _block_device_mapping_get_query(context, session=None):
3312
return model_query(context, models.BlockDeviceMapping, session=session)
3316
def block_device_mapping_create(context, values):
3317
bdm_ref = models.BlockDeviceMapping()
3318
bdm_ref.update(values)
3320
session = get_session()
3321
with session.begin():
3322
bdm_ref.save(session=session)
3326
def block_device_mapping_update(context, bdm_id, values):
3327
session = get_session()
3328
with session.begin():
3329
_block_device_mapping_get_query(context, session=session).\
3330
filter_by(id=bdm_id).\
3335
def block_device_mapping_update_or_create(context, values):
3336
session = get_session()
3337
with session.begin():
3338
result = _block_device_mapping_get_query(context, session=session).\
3339
filter_by(instance_uuid=values['instance_uuid']).\
3340
filter_by(device_name=values['device_name']).\
3343
bdm_ref = models.BlockDeviceMapping()
3344
bdm_ref.update(values)
3345
bdm_ref.save(session=session)
3347
result.update(values)
3349
# NOTE(yamahata): same virtual device name can be specified multiple
3350
# times. So delete the existing ones.
3351
virtual_name = values['virtual_name']
3352
if (virtual_name is not None and
3353
block_device.is_swap_or_ephemeral(virtual_name)):
3354
session.query(models.BlockDeviceMapping).\
3355
filter_by(instance_uuid=values['instance_uuid']).\
3356
filter_by(virtual_name=virtual_name).\
3357
filter(models.BlockDeviceMapping.device_name !=
3358
values['device_name']).\
3359
update({'deleted': True,
3360
'deleted_at': timeutils.utcnow(),
3361
'updated_at': literal_column('updated_at')})
3365
def block_device_mapping_get_all_by_instance(context, instance_uuid):
3366
return _block_device_mapping_get_query(context).\
3367
filter_by(instance_uuid=instance_uuid).\
3372
def block_device_mapping_destroy(context, bdm_id):
3373
session = get_session()
3374
with session.begin():
3375
session.query(models.BlockDeviceMapping).\
3376
filter_by(id=bdm_id).\
3377
update({'deleted': True,
3378
'deleted_at': timeutils.utcnow(),
3379
'updated_at': literal_column('updated_at')})
3383
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
3385
session = get_session()
3386
with session.begin():
3387
_block_device_mapping_get_query(context, session=session).\
3388
filter_by(instance_uuid=instance_uuid).\
3389
filter_by(volume_id=volume_id).\
3390
update({'deleted': True,
3391
'deleted_at': timeutils.utcnow(),
3392
'updated_at': literal_column('updated_at')})
3396
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
3398
session = get_session()
3399
with session.begin():
3400
_block_device_mapping_get_query(context, session=session).\
3401
filter_by(instance_uuid=instance_uuid).\
3402
filter_by(device_name=device_name).\
3403
update({'deleted': True,
3404
'deleted_at': timeutils.utcnow(),
3405
'updated_at': literal_column('updated_at')})
3410
def _security_group_get_query(context, session=None, read_deleted=None,
3411
project_only=False, join_rules=True):
3412
query = model_query(context, models.SecurityGroup, session=session,
3413
read_deleted=read_deleted, project_only=project_only)
3415
query = query.options(joinedload_all('rules'))
3419
def _security_group_get_by_names(context, session, project_id, group_names):
3421
Get security group models for a project by a list of names.
3422
Raise SecurityGroupNotFoundForProject for a name not found.
3424
query = _security_group_get_query(context, session=session,
3425
read_deleted="no", join_rules=False).\
3426
filter_by(project_id=project_id).\
3427
filter(models.SecurityGroup.name.in_(group_names))
3428
sg_models = query.all()
3429
if len(sg_models) == len(group_names):
3431
# Find the first one missing and raise
3432
group_names_from_models = [x.name for x in sg_models]
3433
for group_name in group_names:
3434
if group_name not in group_names_from_models:
3435
raise exception.SecurityGroupNotFoundForProject(
3436
project_id=project_id, security_group_id=group_name)
3441
def security_group_get_all(context):
3442
return _security_group_get_query(context).all()
3446
def security_group_get(context, security_group_id, session=None):
3447
result = _security_group_get_query(context, session=session,
3448
project_only=True).\
3449
filter_by(id=security_group_id).\
3450
options(joinedload_all('instances')).\
3454
raise exception.SecurityGroupNotFound(
3455
security_group_id=security_group_id)
3461
def security_group_get_by_name(context, project_id, group_name,
3462
columns_to_join=None, session=None):
3464
session = get_session()
3466
query = _security_group_get_query(context, session=session,
3467
read_deleted="no", join_rules=False).\
3468
filter_by(project_id=project_id).\
3469
filter_by(name=group_name)
3471
if columns_to_join is None:
3472
columns_to_join = ['instances', 'rules']
3474
for column in columns_to_join:
3475
query = query.options(joinedload_all(column))
3477
result = query.first()
3479
raise exception.SecurityGroupNotFoundForProject(
3480
project_id=project_id, security_group_id=group_name)
3486
def security_group_get_by_project(context, project_id):
3487
return _security_group_get_query(context, read_deleted="no").\
3488
filter_by(project_id=project_id).\
3493
def security_group_get_by_instance(context, instance_id):
3494
return _security_group_get_query(context, read_deleted="no").\
3495
join(models.SecurityGroup.instances).\
3496
filter_by(id=instance_id).\
3501
def security_group_exists(context, project_id, group_name):
3503
group = security_group_get_by_name(context, project_id, group_name)
3504
return group is not None
3505
except exception.NotFound:
3510
def security_group_in_use(context, group_id):
3511
session = get_session()
3512
with session.begin():
3513
# Are there any instances that haven't been deleted
3514
# that include this group?
3515
inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\
3516
filter_by(security_group_id=group_id).\
3517
filter_by(deleted=False).\
3519
for ia in inst_assoc:
3520
num_instances = session.query(models.Instance).\
3521
filter_by(deleted=False).\
3522
filter_by(uuid=ia.instance_uuid).\
3531
def security_group_create(context, values, session=None):
3532
security_group_ref = models.SecurityGroup()
3533
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
3534
# once save() is called. This will get cleaned up in next orm pass.
3535
security_group_ref.rules
3536
security_group_ref.update(values)
3538
session = get_session()
3539
security_group_ref.save(session=session)
3540
return security_group_ref
3543
def security_group_ensure_default(context, session=None):
3544
"""Ensure default security group exists for a project_id.
3546
Returns a tuple with the first element being a bool indicating
3547
if the default security group previously existed. Second
3548
element is the dict used to create the default security group.
3551
default_group = security_group_get_by_name(context,
3552
context.project_id, 'default',
3553
columns_to_join=[], session=session)
3554
return (True, default_group)
3555
except exception.NotFound:
3556
values = {'name': 'default',
3557
'description': 'default',
3558
'user_id': context.user_id,
3559
'project_id': context.project_id}
3560
default_group = security_group_create(context, values,
3562
return (False, default_group)
3566
def security_group_destroy(context, security_group_id):
3567
session = get_session()
3568
with session.begin():
3569
session.query(models.SecurityGroup).\
3570
filter_by(id=security_group_id).\
3571
update({'deleted': True,
3572
'deleted_at': timeutils.utcnow(),
3573
'updated_at': literal_column('updated_at')})
3574
session.query(models.SecurityGroupInstanceAssociation).\
3575
filter_by(security_group_id=security_group_id).\
3576
update({'deleted': True,
3577
'deleted_at': timeutils.utcnow(),
3578
'updated_at': literal_column('updated_at')})
3579
session.query(models.SecurityGroupIngressRule).\
3580
filter_by(group_id=security_group_id).\
3581
update({'deleted': True,
3582
'deleted_at': timeutils.utcnow(),
3583
'updated_at': literal_column('updated_at')})
3585
session.query(models.SecurityGroupIngressRule).\
3586
filter_by(parent_group_id=security_group_id).\
3587
update({'deleted': True,
3588
'deleted_at': timeutils.utcnow(),
3589
'updated_at': literal_column('updated_at')})
3593
def security_group_count_by_project(context, project_id, session=None):
3594
authorize_project_context(context, project_id)
3595
return model_query(context, models.SecurityGroup, read_deleted="no",
3597
filter_by(project_id=project_id).\
3603
def _security_group_rule_get_query(context, session=None):
3604
return model_query(context, models.SecurityGroupIngressRule,
3609
def security_group_rule_get(context, security_group_rule_id, session=None):
3610
result = _security_group_rule_get_query(context, session=session).\
3611
filter_by(id=security_group_rule_id).\
3615
raise exception.SecurityGroupNotFoundForRule(
3616
rule_id=security_group_rule_id)
3622
def security_group_rule_get_by_security_group(context, security_group_id,
3624
return _security_group_rule_get_query(context, session=session).\
3625
filter_by(parent_group_id=security_group_id).\
3626
options(joinedload_all('grantee_group.instances.instance_type')).\
3631
def security_group_rule_get_by_security_group_grantee(context,
3635
return _security_group_rule_get_query(context, session=session).\
3636
filter_by(group_id=security_group_id).\
3641
def security_group_rule_create(context, values):
3642
security_group_rule_ref = models.SecurityGroupIngressRule()
3643
security_group_rule_ref.update(values)
3644
security_group_rule_ref.save()
3645
return security_group_rule_ref
3649
def security_group_rule_destroy(context, security_group_rule_id):
3650
session = get_session()
3651
with session.begin():
3652
security_group_rule = security_group_rule_get(context,
3653
security_group_rule_id,
3655
security_group_rule.delete(session=session)
3659
def security_group_rule_count_by_group(context, security_group_id):
3660
return model_query(context, models.SecurityGroupIngressRule,
3661
read_deleted="no").\
3662
filter_by(parent_group_id=security_group_id).\
3669
@require_admin_context
3670
def provider_fw_rule_create(context, rule):
3671
fw_rule_ref = models.ProviderFirewallRule()
3672
fw_rule_ref.update(rule)
3677
@require_admin_context
3678
def provider_fw_rule_get_all(context):
3679
return model_query(context, models.ProviderFirewallRule).all()
3682
@require_admin_context
3683
def provider_fw_rule_destroy(context, rule_id):
3684
session = get_session()
3685
with session.begin():
3686
session.query(models.ProviderFirewallRule).\
3687
filter_by(id=rule_id).\
3688
update({'deleted': True,
3689
'deleted_at': timeutils.utcnow(),
3690
'updated_at': literal_column('updated_at')})
3697
def project_get_networks(context, project_id, associate=True):
3698
# NOTE(tr3buchet): as before this function will associate
3699
# a project with a network if it doesn't have one and
3701
result = model_query(context, models.Network, read_deleted="no").\
3702
filter_by(project_id=project_id).\
3709
return [network_associate(context, project_id)]
3717
@require_admin_context
3718
def migration_create(context, values):
3719
migration = models.Migration()
3720
migration.update(values)
3725
@require_admin_context
3726
def migration_update(context, id, values):
3727
session = get_session()
3728
with session.begin():
3729
migration = migration_get(context, id, session=session)
3730
migration.update(values)
3731
migration.save(session=session)
3735
@require_admin_context
3736
def migration_get(context, id, session=None):
3737
result = model_query(context, models.Migration, session=session,
3738
read_deleted="yes").\
3743
raise exception.MigrationNotFound(migration_id=id)
3748
@require_admin_context
3749
def migration_get_by_instance_and_status(context, instance_uuid, status):
3750
result = model_query(context, models.Migration, read_deleted="yes").\
3751
filter_by(instance_uuid=instance_uuid).\
3752
filter_by(status=status).\
3756
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
3762
@require_admin_context
3763
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
3764
dest_compute, session=None):
3765
confirm_window = (timeutils.utcnow() -
3766
datetime.timedelta(seconds=confirm_window))
3768
return model_query(context, models.Migration, session=session,
3769
read_deleted="yes").\
3770
filter(models.Migration.updated_at <= confirm_window).\
3771
filter_by(status="finished").\
3772
filter_by(dest_compute=dest_compute).\
3779
def console_pool_create(context, values):
3780
pool = models.ConsolePool()
3786
def console_pool_get_by_host_type(context, compute_host, host,
3789
result = model_query(context, models.ConsolePool, read_deleted="no").\
3790
filter_by(host=host).\
3791
filter_by(console_type=console_type).\
3792
filter_by(compute_host=compute_host).\
3793
options(joinedload('consoles')).\
3797
raise exception.ConsolePoolNotFoundForHostType(
3798
host=host, console_type=console_type,
3799
compute_host=compute_host)
3804
def console_pool_get_all_by_host_type(context, host, console_type):
3805
return model_query(context, models.ConsolePool, read_deleted="no").\
3806
filter_by(host=host).\
3807
filter_by(console_type=console_type).\
3808
options(joinedload('consoles')).\
3812
def console_create(context, values):
3813
console = models.Console()
3814
console.update(values)
3819
def console_delete(context, console_id):
3820
session = get_session()
3821
with session.begin():
3822
# NOTE(mdragon): consoles are meant to be transient.
3823
session.query(models.Console).\
3824
filter_by(id=console_id).\
3828
def console_get_by_pool_instance(context, pool_id, instance_uuid):
3829
result = model_query(context, models.Console, read_deleted="yes").\
3830
filter_by(pool_id=pool_id).\
3831
filter_by(instance_uuid=instance_uuid).\
3832
options(joinedload('pool')).\
3836
raise exception.ConsoleNotFoundInPoolForInstance(
3837
pool_id=pool_id, instance_uuid=instance_uuid)
3842
def console_get_all_by_instance(context, instance_uuid):
3843
return model_query(context, models.Console, read_deleted="yes").\
3844
filter_by(instance_uuid=instance_uuid).\
3848
def console_get(context, console_id, instance_uuid=None):
3849
query = model_query(context, models.Console, read_deleted="yes").\
3850
filter_by(id=console_id).\
3851
options(joinedload('pool'))
3853
if instance_uuid is not None:
3854
query = query.filter_by(instance_uuid=instance_uuid)
3856
result = query.first()
3860
raise exception.ConsoleNotFoundForInstance(
3861
console_id=console_id, instance_uuid=instance_uuid)
3863
raise exception.ConsoleNotFound(console_id=console_id)
3871
@require_admin_context
3872
def instance_type_create(context, values):
3873
"""Create a new instance type. In order to pass in extra specs,
3874
the values dict should contain a 'extra_specs' key/value pair:
3876
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
3879
session = get_session()
3880
with session.begin():
3882
instance_type_get_by_name(context, values['name'], session)
3883
raise exception.InstanceTypeExists(name=values['name'])
3884
except exception.InstanceTypeNotFoundByName:
3887
instance_type_get_by_flavor_id(context, values['flavorid'],
3889
raise exception.InstanceTypeExists(name=values['name'])
3890
except exception.FlavorNotFound:
3893
specs = values.get('extra_specs')
3896
for k, v in specs.iteritems():
3897
specs_ref = models.InstanceTypeExtraSpecs()
3898
specs_ref['key'] = k
3899
specs_ref['value'] = v
3900
specs_refs.append(specs_ref)
3901
values['extra_specs'] = specs_refs
3902
instance_type_ref = models.InstanceTypes()
3903
instance_type_ref.update(values)
3904
instance_type_ref.save(session=session)
3905
except Exception, e:
3906
raise exception.DBError(e)
3907
return _dict_with_extra_specs(instance_type_ref)
3910
def _dict_with_extra_specs(inst_type_query):
3911
"""Takes an instance, volume, or instance type query returned
3912
by sqlalchemy and returns it as a dictionary, converting the
3913
extra_specs entry from a list of dicts:
3915
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
3919
'extra_specs' : {'k1': 'v1'}
3922
inst_type_dict = dict(inst_type_query)
3923
extra_specs = dict([(x['key'], x['value'])
3924
for x in inst_type_query['extra_specs']])
3925
inst_type_dict['extra_specs'] = extra_specs
3926
return inst_type_dict
3929
def _instance_type_get_query(context, session=None, read_deleted=None):
3930
return model_query(context, models.InstanceTypes, session=session,
3931
read_deleted=read_deleted).\
3932
options(joinedload('extra_specs'))
3936
def instance_type_get_all(context, inactive=False, filters=None):
3938
Returns all instance types.
3940
filters = filters or {}
3942
# FIXME(sirp): now that we have the `disabled` field for instance-types, we
3943
# should probably remove the use of `deleted` to mark inactive. `deleted`
3944
# should mean truly deleted, e.g. we can safely purge the record out of the
3946
read_deleted = "yes" if inactive else "no"
3948
query = _instance_type_get_query(context, read_deleted=read_deleted)
3950
if 'min_memory_mb' in filters:
3951
query = query.filter(
3952
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
3954
if 'min_root_gb' in filters:
3955
query = query.filter(
3956
models.InstanceTypes.root_gb >= filters['min_root_gb'])
3958
if 'disabled' in filters:
3959
query = query.filter(
3960
models.InstanceTypes.disabled == filters['disabled'])
3962
if 'is_public' in filters and filters['is_public'] is not None:
3963
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
3964
if filters['is_public'] and context.project_id is not None:
3966
models.InstanceTypes.projects.any(
3967
project_id=context.project_id, deleted=False)
3969
if len(the_filter) > 1:
3970
query = query.filter(or_(*the_filter))
3972
query = query.filter(the_filter[0])
3973
del filters['is_public']
3975
inst_types = query.order_by("name").all()
3977
return [_dict_with_extra_specs(i) for i in inst_types]
3981
def instance_type_get(context, id, session=None):
3982
"""Returns a dict describing specific instance_type"""
3983
result = _instance_type_get_query(context, session=session).\
3988
raise exception.InstanceTypeNotFound(instance_type_id=id)
3990
return _dict_with_extra_specs(result)
3994
def instance_type_get_by_name(context, name, session=None):
3995
"""Returns a dict describing specific instance_type"""
3996
result = _instance_type_get_query(context, session=session).\
3997
filter_by(name=name).\
4001
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
4003
return _dict_with_extra_specs(result)
4007
def instance_type_get_by_flavor_id(context, flavor_id, session=None):
4008
"""Returns a dict describing specific flavor_id"""
4009
result = _instance_type_get_query(context, session=session).\
4010
filter_by(flavorid=flavor_id).\
4014
raise exception.FlavorNotFound(flavor_id=flavor_id)
4016
return _dict_with_extra_specs(result)
4019
@require_admin_context
4020
def instance_type_destroy(context, name):
4021
"""Marks specific instance_type as deleted"""
4022
session = get_session()
4023
with session.begin():
4024
instance_type_ref = instance_type_get_by_name(context, name,
4026
instance_type_id = instance_type_ref['id']
4027
session.query(models.InstanceTypes).\
4028
filter_by(id=instance_type_id).\
4029
update({'deleted': True,
4030
'deleted_at': timeutils.utcnow(),
4031
'updated_at': literal_column('updated_at')})
4032
session.query(models.InstanceTypeExtraSpecs).\
4033
filter_by(instance_type_id=instance_type_id).\
4034
update({'deleted': True,
4035
'deleted_at': timeutils.utcnow(),
4036
'updated_at': literal_column('updated_at')})
4040
def _instance_type_access_query(context, session=None):
4041
return model_query(context, models.InstanceTypeProjects, session=session,
4045
@require_admin_context
4046
def instance_type_access_get_by_flavor_id(context, flavor_id):
4047
"""Get flavor access list by flavor id"""
4048
instance_type_ref = _instance_type_get_query(context).\
4049
filter_by(flavorid=flavor_id).\
4052
return [r for r in instance_type_ref.projects]
4055
@require_admin_context
4056
def instance_type_access_add(context, flavor_id, project_id):
4057
"""Add given tenant to the flavor access list"""
4058
session = get_session()
4059
with session.begin():
4060
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
4062
instance_type_id = instance_type_ref['id']
4063
access_ref = _instance_type_access_query(context, session=session).\
4064
filter_by(instance_type_id=instance_type_id).\
4065
filter_by(project_id=project_id).first()
4068
access_ref = models.InstanceTypeProjects()
4069
access_ref.instance_type_id = instance_type_id
4070
access_ref.project_id = project_id
4071
access_ref.save(session=session)
4072
elif access_ref.deleted:
4073
access_ref.update({'deleted': False,
4074
'deleted_at': None})
4075
access_ref.save(session=session)
4077
raise exception.FlavorAccessExists(flavor_id=flavor_id,
4078
project_id=project_id)
4083
@require_admin_context
4084
def instance_type_access_remove(context, flavor_id, project_id):
4085
"""Remove given tenant from the flavor access list"""
4086
session = get_session()
4087
with session.begin():
4088
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
4090
instance_type_id = instance_type_ref['id']
4091
access_ref = _instance_type_access_query(context, session=session).\
4092
filter_by(instance_type_id=instance_type_id).\
4093
filter_by(project_id=project_id).first()
4096
access_ref.update({'deleted': True,
4097
'deleted_at': timeutils.utcnow(),
4098
'updated_at': literal_column('updated_at')})
4100
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
4101
project_id=project_id)
4104
########################
4105
# User-provided metadata
4107
def _instance_metadata_get_query(context, instance_uuid, session=None):
4108
return model_query(context, models.InstanceMetadata, session=session,
4109
read_deleted="no").\
4110
filter_by(instance_uuid=instance_uuid)
4114
def instance_metadata_get(context, instance_uuid, session=None):
4115
rows = _instance_metadata_get_query(context, instance_uuid,
4116
session=session).all()
4120
result[row['key']] = row['value']
4126
def instance_metadata_delete(context, instance_uuid, key):
4127
_instance_metadata_get_query(context, instance_uuid).\
4128
filter_by(key=key).\
4129
update({'deleted': True,
4130
'deleted_at': timeutils.utcnow(),
4131
'updated_at': literal_column('updated_at')})
4135
def instance_metadata_get_item(context, instance_uuid, key, session=None):
4136
result = _instance_metadata_get_query(
4137
context, instance_uuid, session=session).\
4138
filter_by(key=key).\
4142
raise exception.InstanceMetadataNotFound(metadata_key=key,
4143
instance_uuid=instance_uuid)
4149
def instance_metadata_update(context, instance_uuid, metadata, delete,
4152
session = get_session()
4153
# Set existing metadata to deleted if delete argument is True
4155
original_metadata = instance_metadata_get(context, instance_uuid,
4157
for meta_key, meta_value in original_metadata.iteritems():
4158
if meta_key not in metadata:
4159
meta_ref = instance_metadata_get_item(context, instance_uuid,
4161
meta_ref.update({'deleted': True})
4162
meta_ref.save(session=session)
4166
# Now update all existing items with new values, or create new meta objects
4167
for meta_key, meta_value in metadata.iteritems():
4169
# update the value whether it exists or not
4170
item = {"value": meta_value}
4173
meta_ref = instance_metadata_get_item(context, instance_uuid,
4175
except exception.InstanceMetadataNotFound, e:
4176
meta_ref = models.InstanceMetadata()
4177
item.update({"key": meta_key, "instance_uuid": instance_uuid})
4179
meta_ref.update(item)
4180
meta_ref.save(session=session)
4185
#######################
4186
# System-owned metadata
4188
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
4189
return model_query(context, models.InstanceSystemMetadata,
4191
filter_by(instance_uuid=instance_uuid)
4195
def instance_system_metadata_get(context, instance_uuid, session=None):
4196
rows = _instance_system_metadata_get_query(context, instance_uuid,
4197
session=session).all()
4201
result[row['key']] = row['value']
4207
def instance_system_metadata_delete(context, instance_uuid, key):
4208
_instance_system_metadata_get_query(context, instance_uuid).\
4209
filter_by(key=key).\
4210
update({'deleted': True,
4211
'deleted_at': timeutils.utcnow(),
4212
'updated_at': literal_column('updated_at')})
4215
def _instance_system_metadata_get_item(context, instance_uuid, key,
4217
result = _instance_system_metadata_get_query(
4218
context, instance_uuid, session=session).\
4219
filter_by(key=key).\
4223
raise exception.InstanceSystemMetadataNotFound(
4224
metadata_key=key, instance_uuid=instance_uuid)
4230
def instance_system_metadata_update(context, instance_uuid, metadata, delete,
4233
session = get_session()
4235
# Set existing metadata to deleted if delete argument is True
4237
original_metadata = instance_system_metadata_get(
4238
context, instance_uuid, session=session)
4239
for meta_key, meta_value in original_metadata.iteritems():
4240
if meta_key not in metadata:
4241
meta_ref = _instance_system_metadata_get_item(
4242
context, instance_uuid, meta_key, session)
4243
meta_ref.update({'deleted': True})
4244
meta_ref.save(session=session)
4248
# Now update all existing items with new values, or create new meta objects
4249
for meta_key, meta_value in metadata.iteritems():
4251
# update the value whether it exists or not
4252
item = {"value": meta_value}
4255
meta_ref = _instance_system_metadata_get_item(
4256
context, instance_uuid, meta_key, session)
4257
except exception.InstanceSystemMetadataNotFound, e:
4258
meta_ref = models.InstanceSystemMetadata()
4259
item.update({"key": meta_key, "instance_uuid": instance_uuid})
4261
meta_ref.update(item)
4262
meta_ref.save(session=session)
4267
####################
4270
@require_admin_context
4271
def agent_build_create(context, values):
4272
agent_build_ref = models.AgentBuild()
4273
agent_build_ref.update(values)
4274
agent_build_ref.save()
4275
return agent_build_ref
4278
@require_admin_context
4279
def agent_build_get_by_triple(context, hypervisor, os, architecture,
4281
return model_query(context, models.AgentBuild, session=session,
4282
read_deleted="no").\
4283
filter_by(hypervisor=hypervisor).\
4285
filter_by(architecture=architecture).\
4289
@require_admin_context
4290
def agent_build_get_all(context):
4291
return model_query(context, models.AgentBuild, read_deleted="no").\
4295
@require_admin_context
4296
def agent_build_destroy(context, agent_build_id):
4297
session = get_session()
4298
with session.begin():
4299
model_query(context, models.AgentBuild, session=session,
4300
read_deleted="yes").\
4301
filter_by(id=agent_build_id).\
4302
update({'deleted': True,
4303
'deleted_at': timeutils.utcnow(),
4304
'updated_at': literal_column('updated_at')})
4307
@require_admin_context
4308
def agent_build_update(context, agent_build_id, values):
4309
session = get_session()
4310
with session.begin():
4311
agent_build_ref = model_query(context, models.AgentBuild,
4312
session=session, read_deleted="yes").\
4313
filter_by(id=agent_build_id).\
4316
agent_build_ref.update(values)
4317
agent_build_ref.save(session=session)
4320
####################
4323
def bw_usage_get_by_uuids(context, uuids, start_period):
4324
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
4325
filter(models.BandwidthUsage.uuid.in_(uuids)).\
4326
filter_by(start_period=start_period).\
4331
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
4332
last_refreshed=None, session=None):
4334
session = get_session()
4336
if last_refreshed is None:
4337
last_refreshed = timeutils.utcnow()
4339
# NOTE(comstud): More often than not, we'll be updating records vs
4340
# creating records. Optimize accordingly, trying to update existing
4341
# records. Fall back to creation when no rows are updated.
4342
with session.begin():
4343
values = {'last_refreshed': last_refreshed,
4346
rows = model_query(context, models.BandwidthUsage,
4347
session=session, read_deleted="yes").\
4348
filter_by(start_period=start_period).\
4349
filter_by(uuid=uuid).\
4350
filter_by(mac=mac).\
4351
update(values, synchronize_session=False)
4355
bwusage = models.BandwidthUsage()
4356
bwusage.start_period = start_period
4359
bwusage.last_refreshed = last_refreshed
4360
bwusage.bw_in = bw_in
4361
bwusage.bw_out = bw_out
4362
bwusage.save(session=session)
4365
####################
4368
def _instance_type_extra_specs_get_query(context, flavor_id,
4370
# Two queries necessary because join with update doesn't work.
4371
t = model_query(context, models.InstanceTypes.id,
4372
session=session, read_deleted="no").\
4373
filter(models.InstanceTypes.flavorid == flavor_id).\
4375
return model_query(context, models.InstanceTypeExtraSpecs,
4376
session=session, read_deleted="no").\
4377
filter(models.InstanceTypeExtraSpecs.\
4378
instance_type_id.in_(t))
4382
def instance_type_extra_specs_get(context, flavor_id):
4383
rows = _instance_type_extra_specs_get_query(
4384
context, flavor_id).\
4389
result[row['key']] = row['value']
4395
def instance_type_extra_specs_delete(context, flavor_id, key):
4396
# Don't need synchronize the session since we will not use the query result
4397
_instance_type_extra_specs_get_query(
4398
context, flavor_id).\
4399
filter(models.InstanceTypeExtraSpecs.key == key).\
4400
update({'deleted': True,
4401
'deleted_at': timeutils.utcnow(),
4402
'updated_at': literal_column('updated_at')},
4403
synchronize_session=False)
4407
def instance_type_extra_specs_get_item(context, flavor_id, key,
4409
result = _instance_type_extra_specs_get_query(
4410
context, flavor_id, session=session).\
4411
filter(models.InstanceTypeExtraSpecs.key == key).\
4414
raise exception.InstanceTypeExtraSpecsNotFound(
4415
extra_specs_key=key, instance_type_id=flavor_id)
4421
def instance_type_extra_specs_update_or_create(context, flavor_id,
4423
session = get_session()
4425
instance_type = instance_type_get_by_flavor_id(context, flavor_id)
4426
for key, value in specs.iteritems():
4428
spec_ref = instance_type_extra_specs_get_item(
4429
context, flavor_id, key, session)
4430
except exception.InstanceTypeExtraSpecsNotFound, e:
4431
spec_ref = models.InstanceTypeExtraSpecs()
4432
spec_ref.update({"key": key, "value": value,
4433
"instance_type_id": instance_type["id"],
4435
spec_ref.save(session=session)
4442
@require_admin_context
4443
def volume_type_create(context, values):
4444
"""Create a new instance type. In order to pass in extra specs,
4445
the values dict should contain a 'extra_specs' key/value pair:
4447
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
4450
session = get_session()
4451
with session.begin():
4453
volume_type_get_by_name(context, values['name'], session)
4454
raise exception.VolumeTypeExists(name=values['name'])
4455
except exception.VolumeTypeNotFoundByName:
4458
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
4459
models.VolumeTypeExtraSpecs)
4460
volume_type_ref = models.VolumeTypes()
4461
volume_type_ref.update(values)
4462
volume_type_ref.save()
4463
except Exception, e:
4464
raise exception.DBError(e)
4465
return volume_type_ref
4469
def volume_type_get_all(context, inactive=False, filters=None):
4471
Returns a dict describing all volume_types with name as key.
4473
filters = filters or {}
4475
read_deleted = "yes" if inactive else "no"
4476
rows = model_query(context, models.VolumeTypes,
4477
read_deleted=read_deleted).\
4478
options(joinedload('extra_specs')).\
4482
# TODO(sirp): this patern of converting rows to a result with extra_specs
4483
# is repeated quite a bit, might be worth creating a method for it
4486
result[row['name']] = _dict_with_extra_specs(row)
4492
def volume_type_get(context, id, session=None):
4493
"""Returns a dict describing specific volume_type"""
4494
result = model_query(context, models.VolumeTypes, session=session).\
4495
options(joinedload('extra_specs')).\
4500
raise exception.VolumeTypeNotFound(volume_type_id=id)
4502
return _dict_with_extra_specs(result)
4506
def volume_type_get_by_name(context, name, session=None):
4507
"""Returns a dict describing specific volume_type"""
4508
result = model_query(context, models.VolumeTypes, session=session).\
4509
options(joinedload('extra_specs')).\
4510
filter_by(name=name).\
4514
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
4516
return _dict_with_extra_specs(result)
4519
@require_admin_context
4520
def volume_type_destroy(context, name):
4521
session = get_session()
4522
with session.begin():
4523
volume_type_ref = volume_type_get_by_name(context, name,
4525
volume_type_id = volume_type_ref['id']
4526
session.query(models.VolumeTypes).\
4527
filter_by(id=volume_type_id).\
4528
update({'deleted': True,
4529
'deleted_at': timeutils.utcnow(),
4530
'updated_at': literal_column('updated_at')})
4531
session.query(models.VolumeTypeExtraSpecs).\
4532
filter_by(volume_type_id=volume_type_id).\
4533
update({'deleted': True,
4534
'deleted_at': timeutils.utcnow(),
4535
'updated_at': literal_column('updated_at')})
4539
def volume_get_active_by_window(context, begin, end=None,
4541
"""Return volumes that were active during window."""
4542
session = get_session()
4543
query = session.query(models.Volume)
4545
query = query.filter(or_(models.Volume.deleted_at == None,
4546
models.Volume.deleted_at > begin))
4548
query = query.filter(models.Volume.created_at < end)
4550
query = query.filter_by(project_id=project_id)
4555
####################
4558
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
4559
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
4560
read_deleted="no").\
4561
filter_by(volume_type_id=volume_type_id)
4565
def volume_type_extra_specs_get(context, volume_type_id):
4566
rows = _volume_type_extra_specs_query(context, volume_type_id).\
4571
result[row['key']] = row['value']
4577
def volume_type_extra_specs_delete(context, volume_type_id, key):
4578
_volume_type_extra_specs_query(context, volume_type_id).\
4579
filter_by(key=key).\
4580
update({'deleted': True,
4581
'deleted_at': timeutils.utcnow(),
4582
'updated_at': literal_column('updated_at')})
4586
def volume_type_extra_specs_get_item(context, volume_type_id, key,
4588
result = _volume_type_extra_specs_query(
4589
context, volume_type_id, session=session).\
4590
filter_by(key=key).\
4594
raise exception.VolumeTypeExtraSpecsNotFound(
4595
extra_specs_key=key, volume_type_id=volume_type_id)
4601
def volume_type_extra_specs_update_or_create(context, volume_type_id,
4603
session = get_session()
4605
for key, value in specs.iteritems():
4607
spec_ref = volume_type_extra_specs_get_item(
4608
context, volume_type_id, key, session)
4609
except exception.VolumeTypeExtraSpecsNotFound, e:
4610
spec_ref = models.VolumeTypeExtraSpecs()
4611
spec_ref.update({"key": key, "value": value,
4612
"volume_type_id": volume_type_id,
4614
spec_ref.save(session=session)
4618
####################
4621
def s3_image_get(context, image_id):
4622
"""Find local s3 image represented by the provided id"""
4623
result = model_query(context, models.S3Image, read_deleted="yes").\
4624
filter_by(id=image_id).\
4628
raise exception.ImageNotFound(image_id=image_id)
4633
def s3_image_get_by_uuid(context, image_uuid):
4634
"""Find local s3 image represented by the provided uuid"""
4635
result = model_query(context, models.S3Image, read_deleted="yes").\
4636
filter_by(uuid=image_uuid).\
4640
raise exception.ImageNotFound(image_id=image_uuid)
4645
def s3_image_create(context, image_uuid):
4646
"""Create local s3 image represented by provided uuid"""
4648
s3_image_ref = models.S3Image()
4649
s3_image_ref.update({'uuid': image_uuid})
4651
except Exception, e:
4652
raise exception.DBError(e)
4657
####################
4660
@require_admin_context
4661
def sm_backend_conf_create(context, values):
4662
session = get_session()
4663
with session.begin():
4664
config_params = values['config_params']
4665
backend_conf = model_query(context, models.SMBackendConf,
4667
read_deleted="yes").\
4668
filter_by(config_params=config_params).\
4672
raise exception.Duplicate(_('Backend exists'))
4674
backend_conf = models.SMBackendConf()
4675
backend_conf.update(values)
4676
backend_conf.save(session=session)
4680
@require_admin_context
4681
def sm_backend_conf_update(context, sm_backend_id, values):
4682
session = get_session()
4683
with session.begin():
4684
backend_conf = model_query(context, models.SMBackendConf,
4686
read_deleted="yes").\
4687
filter_by(id=sm_backend_id).\
4690
if not backend_conf:
4691
raise exception.NotFound(
4692
_("No backend config with id %(sm_backend_id)s") % locals())
4694
backend_conf.update(values)
4695
backend_conf.save(session=session)
4699
@require_admin_context
4700
def sm_backend_conf_delete(context, sm_backend_id):
4701
# FIXME(sirp): for consistency, shouldn't this just mark as deleted with
4702
# `purge` actually deleting the record?
4703
session = get_session()
4704
with session.begin():
4705
model_query(context, models.SMBackendConf, session=session,
4706
read_deleted="yes").\
4707
filter_by(id=sm_backend_id).\
4711
@require_admin_context
4712
def sm_backend_conf_get(context, sm_backend_id):
4713
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
4714
filter_by(id=sm_backend_id).\
4718
raise exception.NotFound(_("No backend config with id "
4719
"%(sm_backend_id)s") % locals())
4724
@require_admin_context
4725
def sm_backend_conf_get_by_sr(context, sr_uuid):
4726
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
4727
filter_by(sr_uuid=sr_uuid).\
4730
raise exception.NotFound(_("No backend config with sr uuid "
4731
"%(sr_uuid)s") % locals())
4735
@require_admin_context
4736
def sm_backend_conf_get_all(context):
4737
return model_query(context, models.SMBackendConf, read_deleted="yes").\
4741
####################
4744
def _sm_flavor_get_query(context, sm_flavor_id, session=None):
4745
return model_query(context, models.SMFlavors, session=session,
4746
read_deleted="yes").\
4747
filter_by(id=sm_flavor_id)
4750
@require_admin_context
4751
def sm_flavor_create(context, values):
4752
session = get_session()
4753
with session.begin():
4754
sm_flavor = model_query(context, models.SMFlavors,
4756
read_deleted="yes").\
4757
filter_by(label=values['label']).\
4760
sm_flavor = models.SMFlavors()
4761
sm_flavor.update(values)
4762
sm_flavor.save(session=session)
4764
raise exception.Duplicate(_('Flavor exists'))
4768
@require_admin_context
4769
def sm_flavor_update(context, sm_flavor_id, values):
4770
session = get_session()
4771
with session.begin():
4772
sm_flavor = model_query(context, models.SMFlavors,
4774
read_deleted="yes").\
4775
filter_by(id=sm_flavor_id).\
4778
raise exception.NotFound(
4779
_('%(sm_flavor_id) flavor not found') % locals())
4780
sm_flavor.update(values)
4781
sm_flavor.save(session=session)
4785
@require_admin_context
4786
def sm_flavor_delete(context, sm_flavor_id):
4787
session = get_session()
4788
with session.begin():
4789
_sm_flavor_get_query(context, sm_flavor_id).delete()
4792
@require_admin_context
4793
def sm_flavor_get(context, sm_flavor_id):
4794
result = _sm_flavor_get_query(context, sm_flavor_id).first()
4797
raise exception.NotFound(
4798
_("No sm_flavor called %(sm_flavor_id)s") % locals())
4803
@require_admin_context
4804
def sm_flavor_get_all(context):
4805
return model_query(context, models.SMFlavors, read_deleted="yes").all()
4808
@require_admin_context
4809
def sm_flavor_get_by_label(context, sm_flavor_label):
4810
result = model_query(context, models.SMFlavors,
4811
read_deleted="yes").\
4812
filter_by(label=sm_flavor_label).first()
4814
raise exception.NotFound(
4815
_("No sm_flavor called %(sm_flavor_label)s") % locals())
4819
###############################
4822
def _sm_volume_get_query(context, volume_id, session=None):
4823
return model_query(context, models.SMVolume, session=session,
4824
read_deleted="yes").\
4825
filter_by(id=volume_id)
4828
def sm_volume_create(context, values):
4829
sm_volume = models.SMVolume()
4830
sm_volume.update(values)
4835
def sm_volume_update(context, volume_id, values):
4836
sm_volume = sm_volume_get(context, volume_id)
4837
sm_volume.update(values)
4842
def sm_volume_delete(context, volume_id):
4843
session = get_session()
4844
with session.begin():
4845
_sm_volume_get_query(context, volume_id, session=session).delete()
4848
def sm_volume_get(context, volume_id):
4849
result = _sm_volume_get_query(context, volume_id).first()
4852
raise exception.NotFound(
4853
_("No sm_volume with id %(volume_id)s") % locals())
4858
def sm_volume_get_all(context):
4859
return model_query(context, models.SMVolume, read_deleted="yes").all()
4865
def _aggregate_get_query(context, model_class, id_field, id,
4866
session=None, read_deleted=None):
4867
return model_query(context, model_class, session=session,
4868
read_deleted=read_deleted).filter(id_field == id)
4871
@require_admin_context
4872
def aggregate_create(context, values, metadata=None):
4873
session = get_session()
4874
aggregate = _aggregate_get_query(context,
4876
models.Aggregate.name,
4879
read_deleted='no').first()
4881
aggregate = models.Aggregate()
4882
aggregate.update(values)
4883
aggregate.save(session=session)
4885
raise exception.AggregateNameExists(aggregate_name=values['name'])
4887
aggregate_metadata_add(context, aggregate.id, metadata)
4891
@require_admin_context
4892
def aggregate_get(context, aggregate_id):
4893
aggregate = _aggregate_get_query(context,
4895
models.Aggregate.id,
4896
aggregate_id).first()
4899
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
4904
@require_admin_context
4905
def aggregate_get_by_host(context, host, key=None):
4906
query = model_query(context, models.Aggregate).join(
4907
"_hosts").filter(models.AggregateHost.host == host)
4910
query = query.join("_metadata").filter(
4911
models.AggregateMetadata.key == key)
4915
@require_admin_context
4916
def aggregate_metadata_get_by_host(context, host, key=None):
4917
query = model_query(context, models.Aggregate).join(
4918
"_hosts").filter(models.AggregateHost.host == host).join(
4922
query = query.filter(models.AggregateMetadata.key == key)
4924
metadata = collections.defaultdict(set)
4926
for kv in agg._metadata:
4927
metadata[kv['key']].add(kv['value'])
4931
@require_admin_context
4932
def aggregate_update(context, aggregate_id, values):
4933
session = get_session()
4934
aggregate = _aggregate_get_query(context,
4936
models.Aggregate.id,
4938
session=session).first()
4940
metadata = values.get('metadata')
4941
if metadata is not None:
4942
aggregate_metadata_add(context,
4944
values.pop('metadata'),
4946
with session.begin():
4947
aggregate.update(values)
4948
aggregate.save(session=session)
4949
values['metadata'] = metadata
4952
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
4955
@require_admin_context
4956
def aggregate_delete(context, aggregate_id):
4957
query = _aggregate_get_query(context,
4959
models.Aggregate.id,
4962
query.update({'deleted': True,
4963
'deleted_at': timeutils.utcnow(),
4964
'updated_at': literal_column('updated_at')})
4966
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
4969
rows = model_query(context,
4970
models.AggregateMetadata).\
4971
filter_by(aggregate_id=aggregate_id).\
4972
update({'deleted': True,
4973
'deleted_at': timeutils.utcnow(),
4974
'updated_at': literal_column('updated_at')})
4977
@require_admin_context
4978
def aggregate_get_all(context):
4979
return model_query(context, models.Aggregate).all()
4982
@require_admin_context
4983
@require_aggregate_exists
4984
def aggregate_metadata_get(context, aggregate_id):
4985
rows = model_query(context,
4986
models.AggregateMetadata).\
4987
filter_by(aggregate_id=aggregate_id).all()
4989
return dict([(r['key'], r['value']) for r in rows])
4992
@require_admin_context
4993
@require_aggregate_exists
4994
def aggregate_metadata_delete(context, aggregate_id, key):
4995
query = _aggregate_get_query(context,
4996
models.AggregateMetadata,
4997
models.AggregateMetadata.aggregate_id,
5001
query.update({'deleted': True,
5002
'deleted_at': timeutils.utcnow(),
5003
'updated_at': literal_column('updated_at')})
5005
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
5009
@require_admin_context
5010
@require_aggregate_exists
5011
def aggregate_metadata_get_item(context, aggregate_id, key, session=None):
5012
result = _aggregate_get_query(context,
5013
models.AggregateMetadata,
5014
models.AggregateMetadata.aggregate_id,
5015
aggregate_id, session=session,
5016
read_deleted='yes').\
5017
filter_by(key=key).first()
5020
raise exception.AggregateMetadataNotFound(metadata_key=key,
5021
aggregate_id=aggregate_id)
5026
@require_admin_context
5027
@require_aggregate_exists
5028
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
5029
session = get_session()
5032
original_metadata = aggregate_metadata_get(context, aggregate_id)
5033
for meta_key, meta_value in original_metadata.iteritems():
5034
if meta_key not in metadata:
5035
meta_ref = aggregate_metadata_get_item(context, aggregate_id,
5037
meta_ref.update({'deleted': True})
5038
meta_ref.save(session=session)
5042
for meta_key, meta_value in metadata.iteritems():
5043
item = {"value": meta_value}
5045
meta_ref = aggregate_metadata_get_item(context, aggregate_id,
5047
if meta_ref.deleted:
5048
item.update({'deleted': False, 'deleted_at': None})
5049
except exception.AggregateMetadataNotFound:
5050
meta_ref = models.AggregateMetadata()
5051
item.update({"key": meta_key, "aggregate_id": aggregate_id})
5053
meta_ref.update(item)
5054
meta_ref.save(session=session)
5059
@require_admin_context
5060
@require_aggregate_exists
5061
def aggregate_host_get_all(context, aggregate_id):
5062
rows = model_query(context,
5063
models.AggregateHost).\
5064
filter_by(aggregate_id=aggregate_id).all()
5066
return [r.host for r in rows]
5069
@require_admin_context
5070
@require_aggregate_exists
5071
def aggregate_host_delete(context, aggregate_id, host):
5072
query = _aggregate_get_query(context,
5073
models.AggregateHost,
5074
models.AggregateHost.aggregate_id,
5075
aggregate_id).filter_by(host=host)
5077
query.update({'deleted': True,
5078
'deleted_at': timeutils.utcnow(),
5079
'updated_at': literal_column('updated_at')})
5081
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
5085
@require_admin_context
5086
@require_aggregate_exists
5087
def aggregate_host_add(context, aggregate_id, host):
5088
session = get_session()
5089
host_ref = _aggregate_get_query(context,
5090
models.AggregateHost,
5091
models.AggregateHost.aggregate_id,
5094
read_deleted='yes').\
5095
filter_by(host=host).first()
5097
host_ref = models.AggregateHost()
5098
values = {"host": host, "aggregate_id": aggregate_id, }
5099
host_ref.update(values)
5100
host_ref.save(session=session)
5101
elif host_ref.deleted:
5102
host_ref.update({'deleted': False, 'deleted_at': None})
5103
host_ref.save(session=session)
5105
raise exception.AggregateHostExists(host=host,
5106
aggregate_id=aggregate_id)
5113
def instance_fault_create(context, values):
5114
"""Create a new InstanceFault."""
5115
fault_ref = models.InstanceFault()
5116
fault_ref.update(values)
5118
return dict(fault_ref.iteritems())
5121
def instance_fault_get_by_instance_uuids(context, instance_uuids):
5122
"""Get all instance faults for the provided instance_uuids."""
5123
rows = model_query(context, models.InstanceFault, read_deleted='no').\
5124
filter(models.InstanceFault.instance_uuid.in_(
5126
order_by(desc("created_at")).\
5130
for instance_uuid in instance_uuids:
5131
output[instance_uuid] = []
5134
data = dict(row.iteritems())
5135
output[row['instance_uuid']].append(data)
5144
def ec2_instance_create(context, instance_uuid, id=None):
5145
"""Create ec2 compatable instance by provided uuid"""
5146
ec2_instance_ref = models.InstanceIdMapping()
5147
ec2_instance_ref.update({'uuid': instance_uuid})
5149
ec2_instance_ref.update({'id': id})
5151
ec2_instance_ref.save()
5153
return ec2_instance_ref
5157
def get_ec2_instance_id_by_uuid(context, instance_id, session=None):
5158
result = _ec2_instance_get_query(context,
5160
filter_by(uuid=instance_id).\
5164
raise exception.InstanceNotFound(instance_id=instance_id)
5170
def get_instance_uuid_by_ec2_id(context, ec2_id, session=None):
5171
result = _ec2_instance_get_query(context,
5173
filter_by(id=ec2_id).\
5177
raise exception.InstanceNotFound(instance_id=ec2_id)
5179
return result['uuid']
5183
def _ec2_instance_get_query(context, session=None):
5184
return model_query(context,
5185
models.InstanceIdMapping,
5190
@require_admin_context
5191
def task_log_get(context, task_name, period_beginning,
5192
period_ending, host, state=None, session=None):
5193
query = model_query(context, models.TaskLog, session=session).\
5194
filter_by(task_name=task_name).\
5195
filter_by(period_beginning=period_beginning).\
5196
filter_by(period_ending=period_ending).\
5197
filter_by(host=host)
5198
if state is not None:
5199
query = query.filter_by(state=state)
5201
return query.first()
5204
@require_admin_context
5205
def task_log_get_all(context, task_name, period_beginning,
5206
period_ending, host=None, state=None, session=None):
5207
query = model_query(context, models.TaskLog, session=session).\
5208
filter_by(task_name=task_name).\
5209
filter_by(period_beginning=period_beginning).\
5210
filter_by(period_ending=period_ending)
5211
if host is not None:
5212
query = query.filter_by(host=host)
5213
if state is not None:
5214
query = query.filter_by(state=state)
5218
@require_admin_context
5219
def task_log_begin_task(context, task_name,
5226
session = session or get_session()
5227
with session.begin():
5228
task = task_log_get(context, task_name,
5234
#It's already run(ning)!
5235
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
5236
task = models.TaskLog()
5237
task.task_name = task_name
5238
task.period_beginning = period_beginning
5239
task.period_ending = period_ending
5241
task.state = "RUNNING"
5243
task.message = message
5245
task.task_items = task_items
5246
task.save(session=session)
5250
@require_admin_context
5251
def task_log_end_task(context, task_name,
5258
session = session or get_session()
5259
with session.begin():
5260
task = task_log_get(context, task_name,
5267
raise exception.TaskNotRunning(task_name=task_name, host=host)
5270
task.message = message
5271
task.errors = errors
5272
task.save(session=session)