On 05/16/2013 05:22 PM, Nico Golde wrote: > Package: keystone > Severity: grave > Tags: security patch > > Hi, > the following vulnerability was published for keystone. > > CVE-2013-2014[0]: > | Concurrent requests with large POST body can crash the keystone process. > | This can be used by Malicious and lead to DOS to Cloud Service Provider. > > If you fix the vulnerability please also make sure to include the > CVE (Common Vulnerabilities & Exposures) id in your changelog entry. > > Upstream patch: https://review.openstack.org/#/c/22661/ > > Seems to be fixed for experimental in 2013.1-1. > > For further information see: > > [0] http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-2014 > http://security-tracker.debian.org/tracker/CVE-2013-2014
FYI, the patch for Grizzly is there: https://review.openstack.org/#/c/19567/ Though I don't think it will be trivial to backport. I have attached the corresponding git commit from the Grizzly (eg: 2013.1.x) branch. Indeed, 2013.1.1 isn't vulnerable (I could see the patch in the git log). I have uploaded earlier today that version to Sid (and that was unrelated to this issue, I was just working on it). Cheers, Thomas
diff --git a/etc/keystone.conf.sample b/etc/keystone.conf.sample index 13a7847..4017a04 100644 --- a/etc/keystone.conf +++ b/etc/keystone.conf @@ -186,6 +186,9 @@ paste.filter_factory = keystone.contrib.s3:S3Extension.factory [filter:url_normalize] paste.filter_factory = keystone.middleware:NormalizingFilter.factory +[filter:sizelimit] +paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory + [filter:stats_monitoring] paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory @@ -202,13 +205,13 @@ paste.app_factory = keystone.service:v3_app_factory paste.app_factory = keystone.service:admin_app_factory [pipeline:public_api] -pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service +pipeline = sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service [pipeline:admin_api] -pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service +pipeline = sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service [pipeline:api_v3] -pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3 +pipeline = sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3 [app:public_version_service] paste.app_factory = keystone.service:public_version_app_factory @@ -217,10 +220,10 @@ paste.app_factory = keystone.service:public_version_app_factory paste.app_factory = keystone.service:admin_version_app_factory [pipeline:public_version_api] -pipeline = stats_monitoring url_normalize xml_body public_version_service +pipeline = sizelimit stats_monitoring url_normalize xml_body public_version_service [pipeline:admin_version_api] -pipeline = stats_monitoring url_normalize xml_body admin_version_service +pipeline = sizelimit stats_monitoring url_normalize xml_body admin_version_service [composite:main] use = egg:Paste#urlmap diff --git a/keystone/common/ldap/fakeldap.py b/keystone/common/ldap/fakeldap.py index b41f20d..6e962f4 100644 --- a/keystone/common/ldap/fakeldap.py +++ b/keystone/common/ldap/fakeldap.py @@ -321,10 +321,7 @@ class FakeLdap(object): objects = [] for dn, attrs in results: # filter the objects by query - id_attr, id_val = dn.partition(',')[0].split('=', 1) - match_attrs = attrs.copy() - match_attrs[id_attr] = [id_val] - if not query or _match_query(query, match_attrs): + if not query or _match_query(query, attrs): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) diff --git a/keystone/common/sql/migrate_repo/versions/008_normalize_identity.py b/keystone/common/sql/migrate_repo/versions/008_normalize_identity.py index d08ad7c..3f5ae3b 100644 --- a/keystone/common/sql/migrate_repo/versions/008_normalize_identity.py +++ b/keystone/common/sql/migrate_repo/versions/008_normalize_identity.py @@ -16,19 +16,54 @@ from sqlalchemy import Column, MetaData, String, Table, Text, types +from sqlalchemy.orm import sessionmaker -#this won't work on sqlite. It doesn't support dropping columns +#sqlite doesn't support dropping columns. Copy to a new table instead def downgrade_user_table(meta, migrate_engine): - user_table = Table('user', meta, autoload=True) - user_table.columns["password"].drop() - user_table.columns["enabled"].drop() + maker = sessionmaker(bind=migrate_engine) + session = maker() + session.execute("ALTER TABLE user RENAME TO orig_user;") + + user_table = Table( + 'user', + meta, + Column('id', String(64), primary_key=True), + Column('name', String(64), unique=True, nullable=False), + Column('extra', Text())) + user_table.create(migrate_engine, checkfirst=True) + + orig_user_table = Table('orig_user', meta, autoload=True) + for user in session.query(orig_user_table): + session.execute("insert into user (id, name, extra) " + "values ( :id, :name, :extra);", + {'id': user.id, + 'name': user.name, + 'extra': user.extra}) + session.execute("drop table orig_user;") def downgrade_tenant_table(meta, migrate_engine): - tenant_table = Table('tenant', meta, autoload=True) - tenant_table.columns["description"].drop() - tenant_table.columns["enabled"].drop() + maker = sessionmaker(bind=migrate_engine) + session = maker() + session.execute("ALTER TABLE tenant RENAME TO orig_tenant;") + + tenant_table = Table( + 'tenant', + meta, + Column('id', String(64), primary_key=True), + Column('name', String(64), unique=True, nullable=False), + Column('extra', Text())) + tenant_table.create(migrate_engine, checkfirst=True) + + orig_tenant_table = Table('orig_tenant', meta, autoload=True) + for tenant in session.query(orig_tenant_table): + session.execute("insert into tenant (id, name, extra) " + "values ( :id, :name, :extra);", + {'id': tenant.id, + 'name': tenant.name, + 'extra': tenant.extra}) + session.execute("drop table orig_tenant;") def upgrade_user_table(meta, migrate_engine): diff --git a/keystone/common/sql/migrate_repo/versions/008_sqlite_downgrade.sql b/keystone/common/sql/migrate_repo/versions/008_sqlite_downgrade.sql deleted file mode 100644 index 4dec683..0000000 --- a/keystone/common/sql/migrate_repo/versions/008_sqlite_downgrade.sql +++ /dev/null @@ -1,5 +0,0 @@ --- not supported by sqlite, but should be: --- alter TABLE tenant drop column description; --- alter TABLE tenant drop column enabled; --- The downgrade process will fail without valid SQL in this file -select count(*) from tenant; diff --git a/keystone/common/sql/migrate_repo/versions/009_normalize_identity_migration.py b/keystone/common/sql/migrate_repo/versions/009_normalize_identity_migration.py index 517d536..4ad4db7 100644 --- a/keystone/common/sql/migrate_repo/versions/009_normalize_identity_migration.py +++ b/keystone/common/sql/migrate_repo/versions/009_normalize_identity_migration.py @@ -19,41 +19,30 @@ import json from sqlalchemy import MetaData, Table from sqlalchemy.orm import sessionmaker -disabled_values = ['false', 'disabled', 'no', '0'] + +DISABLED_VALUES = ['false', 'disabled', 'no', '0'] def is_enabled(enabled): - #no explicit value means enabled - if enabled is None: - return 1 - if enabled is str: - if str(enabled).lower() in disabled_values: - return 0 - if enabled: - return 1 - else: - return 0 + # no explicit value means enabled + if enabled is True or enabled is None: + return True + if isinstance(enabled, basestring) and enabled.lower() in DISABLED_VALUES: + return False + return bool(enabled) def downgrade_user_table(meta, migrate_engine): user_table = Table('user', meta, autoload=True) maker = sessionmaker(bind=migrate_engine) session = maker() - user_data = [] - for a_user in session.query(user_table): - id, name, extra, password, enabled = a_user - extra_parsed = json.loads(extra) - extra_parsed['password'] = password - extra_parsed['enabled'] = "%r" % enabled - user_data.append((password, - json.dumps(extra_parsed), - is_enabled(enabled), id)) - for user in user_data: - session.execute("update user " - "set extra = '%s' " - "where id = '%s'" % - user) - + for user in session.query(user_table).all(): + extra = json.loads(user.extra) + extra['password'] = user.password + extra['enabled'] = '%r' % user.enabled + session.execute( + 'UPDATE `user` SET `extra`=:extra WHERE `id`=:id', + {'id': user.id, 'extra': json.dumps(extra)}) session.commit() @@ -61,21 +50,13 @@ def downgrade_tenant_table(meta, migrate_engine): tenant_table = Table('tenant', meta, autoload=True) maker = sessionmaker(bind=migrate_engine) session = maker() - tenant_data = [] - for a_tenant in session.query(tenant_table): - id, name, extra, password, enabled = a_tenant - extra_parsed = json.loads(extra) - extra_parsed['description'] = description - extra_parsed['enabled'] = "%r" % enabled - tenant_data.append((password, - json.dumps(extra_parsed), - is_enabled(enabled), id)) - for tenant in tenant_data: - session.execute("update tenant " - "set extra = '%s' " - "where id = '%s'" % - tenant) - + for tenant in session.query(tenant_table).all(): + extra = json.loads(tenant.extra) + extra['description'] = tenant.description + extra['enabled'] = '%r' % tenant.enabled + session.execute( + 'UPDATE `tenant` SET `extra`=:extra WHERE `id`=:id', + {'id': tenant.id, 'extra': json.dumps(extra)}) session.commit() @@ -84,24 +65,18 @@ def upgrade_user_table(meta, migrate_engine): maker = sessionmaker(bind=migrate_engine) session = maker() - new_user_data = [] - for a_user in session.query(user_table): - id, name, extra, password, enabled = a_user - extra_parsed = json.loads(extra) - if 'password' in extra_parsed: - password = extra_parsed['password'] - extra_parsed.pop('password') - if 'enabled' in extra_parsed: - enabled = extra_parsed['enabled'] - extra_parsed.pop('enabled') - new_user_data.append((password, - json.dumps(extra_parsed), - is_enabled(enabled), id)) - for new_user in new_user_data: - session.execute("update user " - "set password = '%s', extra = '%s', enabled = '%s' " - "where id = '%s'" % - new_user) + for user in session.query(user_table).all(): + extra = json.loads(user.extra) + password = extra.pop('password', None) + enabled = extra.pop('enabled', True) + session.execute( + 'UPDATE `user` SET `password`=:password, `enabled`=:enabled, ' + '`extra`=:extra WHERE `id`=:id', + { + 'id': user.id, + 'password': password, + 'enabled': is_enabled(enabled), + 'extra': json.dumps(extra)}) session.commit() @@ -110,24 +85,18 @@ def upgrade_tenant_table(meta, migrate_engine): maker = sessionmaker(bind=migrate_engine) session = maker() - new_tenant_data = [] - for a_tenant in session.query(tenant_table): - id, name, extra, description, enabled = a_tenant - extra_parsed = json.loads(extra) - if 'description' in extra_parsed: - description = extra_parsed['description'] - extra_parsed.pop('description') - if 'enabled' in extra_parsed: - enabled = extra_parsed['enabled'] - extra_parsed.pop('enabled') - new_tenant_data.append((description, - json.dumps(extra_parsed), - is_enabled(enabled), id)) - for new_tenant in new_tenant_data: - session.execute("update tenant " - "set description = '%s', extra = '%s', enabled = '%s' " - "where id = '%s'" % - new_tenant) + for tenant in session.query(tenant_table): + extra = json.loads(tenant.extra) + description = extra.pop('description', None) + enabled = extra.pop('enabled', True) + session.execute( + 'UPDATE `tenant` SET `enabled`=:enabled, `extra`=:extra, ' + '`description`=:description WHERE `id`=:id', + { + 'id': tenant.id, + 'description': description, + 'enabled': is_enabled(enabled), + 'extra': json.dumps(extra)}) session.commit() diff --git a/keystone/common/sql/migrate_repo/versions/011_populate_endpoint_type.py b/keystone/common/sql/migrate_repo/versions/011_populate_endpoint_type.py index ae41d1f..abfe728 100644 --- a/keystone/common/sql/migrate_repo/versions/011_populate_endpoint_type.py +++ b/keystone/common/sql/migrate_repo/versions/011_populate_endpoint_type.py @@ -50,9 +50,10 @@ def upgrade(migrate_engine): } session.execute( 'INSERT INTO `%s` (%s) VALUES (%s)' % ( - new_table.name, - ', '.join('%s' % k for k in endpoint.keys()), - ', '.join("'%s'" % v for v in endpoint.values()))) + new_table.name, + ', '.join('%s' % k for k in endpoint.keys()), + ', '.join([':%s' % k for k in endpoint.keys()])), + endpoint) session.commit() @@ -78,9 +79,10 @@ def downgrade(migrate_engine): try: session.execute( 'INSERT INTO `%s` (%s) VALUES (%s)' % ( - legacy_table.name, - ', '.join('%s' % k for k in endpoint.keys()), - ', '.join("'%s'" % v for v in endpoint.values()))) + legacy_table.name, + ', '.join('%s' % k for k in endpoint.keys()), + ', '.join([':%s' % k for k in endpoint.keys()])), + endpoint) except sql.exc.IntegrityError: q = session.query(legacy_table) q = q.filter_by(id=ref.legacy_endpoint_id) @@ -89,8 +91,7 @@ def downgrade(migrate_engine): extra['%surl' % ref.interface] = ref.url session.execute( - 'UPDATE `%s` SET extra=\'%s\' WHERE id="%s"' % ( - legacy_table.name, - json.dumps(extra), - legacy_ref.id)) + 'UPDATE `%s` SET extra=:extra WHERE id=:id' % ( + legacy_table.name), + {'extra': json.dumps(extra), 'id': legacy_ref.id}) session.commit() diff --git a/keystone/common/utils.py b/keystone/common/utils.py index d74da5b..2c194db 100644 --- a/keystone/common/utils.py +++ b/keystone/common/utils.py @@ -311,3 +311,37 @@ def setup_remote_pydev_debug(): except: LOG.exception(_(error_msg)) raise + + +class LimitingReader(object): + """Reader to limit the size of an incoming request.""" + def __init__(self, data, limit): + """ + :param data: Underlying data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + raise exception.RequestTooLarge() + else: + yield chunk + + def read(self, i): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + raise exception.RequestTooLarge() + return result + + def read(self): + result = self.data.read() + self.bytes_read += len(result) + if self.bytes_read > self.limit: + raise exception.RequestTooLarge() + return result diff --git a/keystone/config.py b/keystone/config.py index c26a518..72fd0dc 100644 --- a/keystone/config.py +++ b/keystone/config.py @@ -137,6 +137,8 @@ register_str('onready') register_str('auth_admin_prefix', default='') register_str('policy_file', default='policy.json') register_str('policy_default_rule', default=None) +#default max request size is 112k +register_int('max_request_body_size', default=114688) #ssl options register_bool('enable', group='ssl', default=False) diff --git a/keystone/exception.py b/keystone/exception.py index 26697e0..2787e06 100644 --- a/keystone/exception.py +++ b/keystone/exception.py @@ -173,6 +173,12 @@ class Conflict(Error): title = 'Conflict' +class RequestTooLarge(Error): + """Request is too large.""" + code = 413 + title = 'Request is too large.' + + class UnexpectedError(Error): """An unexpected error prevented the server from fulfilling your request. diff --git a/keystone/identity/backends/ldap/core.py b/keystone/identity/backends/ldap/core.py index 175ff02..130c319 100644 --- a/keystone/identity/backends/ldap/core.py +++ b/keystone/identity/backends/ldap/core.py @@ -1038,8 +1038,7 @@ class RoleApi(common_ldap.BaseLdap, ApiShimMixin): def delete(self, id): conn = self.get_connection() - query = '(&(objectClass=%s)(%s=%s))' % (self.object_class, - self.id_attr, id) + query = '(objectClass=%s)' % self.object_class tenant_dn = self.tenant_api.tree_dn try: for role_dn, _ in conn.search_s(tenant_dn, diff --git a/keystone/middleware/core.py b/keystone/middleware/core.py index a49f743..24495c9 100644 --- a/keystone/middleware/core.py +++ b/keystone/middleware/core.py @@ -14,7 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. +import webob.dec + from keystone.common import serializer +from keystone.common import utils from keystone.common import wsgi from keystone import config from keystone import exception @@ -164,3 +167,21 @@ class NormalizingFilter(wsgi.Middleware): # Rewrites path to root if no path is given. elif not request.environ['PATH_INFO']: request.environ['PATH_INFO'] = '/' + + +class RequestBodySizeLimiter(wsgi.Middleware): + """Limit the size of an incoming request.""" + + def __init__(self, *args, **kwargs): + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + + if req.content_length > CONF.max_request_body_size: + raise exception.RequestTooLarge() + if req.content_length is None and req.is_body_readable: + limiter = utils.LimitingReader(req.body_file, + CONF.max_request_body_size) + req.body_file = limiter + return self.application diff --git a/tests/backend_sql.conf b/tests/backend_sql.conf index 79bb340..f42eba6 100644 --- a/tests/backend_sql.conf +++ b/tests/backend_sql.conf @@ -1,5 +1,9 @@ [sql] connection = sqlite:// +#To Test MySQL: +#connection = mysql://root:keystone@localhost/keystone?charset=utf8 +#To Test PostgreSQL: +#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 idle_timeout = 200 [identity] diff --git a/tests/test_backend.py b/tests/test_backend.py index 9e0bfff..80b709d 100644 --- a/tests/test_backend.py +++ b/tests/test_backend.py @@ -822,18 +822,13 @@ class IdentityTests(object): def test_delete_role_check_role_grant(self): role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} - alt_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} self.identity_api.create_role(role['id'], role) - self.identity_api.create_role(alt_role['id'], alt_role) self.identity_api.add_role_to_user_and_tenant( self.user_foo['id'], self.tenant_bar['id'], role['id']) - self.identity_api.add_role_to_user_and_tenant( - self.user_foo['id'], self.tenant_bar['id'], alt_role['id']) self.identity_api.delete_role(role['id']) roles_ref = self.identity_api.get_roles_for_user_and_tenant( self.user_foo['id'], self.tenant_bar['id']) self.assertNotIn(role['id'], roles_ref) - self.assertIn(alt_role['id'], roles_ref) def test_create_tenant_doesnt_modify_passed_in_dict(self): new_tenant = {'id': 'tenant_id', 'name': 'new_tenant'} diff --git a/tests/test_sizelimit.py b/tests/test_sizelimit.py new file mode 100644 index 0000000..aec57ec --- /dev/null +++ b/tests/test_sizelimit.py @@ -0,0 +1,56 @@ +# Copyright (c) 2013 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from keystone import config +from keystone import exception +from keystone import middleware +from keystone import test + +CONF = config.CONF +MAX_REQUEST_BODY_SIZE = CONF.max_request_body_size + + +class TestRequestBodySizeLimiter(test.TestCase): + + def setUp(self): + super(TestRequestBodySizeLimiter, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + return webob.Response(req.body) + + self.middleware = middleware.RequestBodySizeLimiter(fake_app) + self.request = webob.Request.blank('/', method='POST') + + def test_content_length_acceptable(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + self.request.body = "0" * MAX_REQUEST_BODY_SIZE + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 200) + + def test_content_length_too_large(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + self.assertRaises(exception.RequestTooLarge, + self.request.get_response, + self.middleware) + + def test_request_too_large_no_content_length(self): + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + self.request.headers['Content-Length'] = None + self.assertRaises(exception.RequestTooLarge, + self.request.get_response, + self.middleware) diff --git a/tests/test_sql_upgrade.py b/tests/test_sql_upgrade.py index d6fb52e..88eaaa8 100644 --- a/tests/test_sql_upgrade.py +++ b/tests/test_sql_upgrade.py @@ -13,7 +13,18 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +To run these tests against a live database: +1. Modify the file `tests/backend_sql.conf` to use the connection for your + live database +2. Set up a blank, live database. +3. run the tests using + ./run_tests.sh -N test_sql_upgrade + WARNING:: + Your database will be wiped. + Do not do this against a Database with valuable data as + all data will be lost. +""" import copy import json import uuid @@ -53,7 +64,14 @@ class SqlUpgradeTests(test.TestCase): self.schema = versioning_api.ControlledSchema.create(self.engine, self.repo_path, 0) + # auto-detect the highest available schema version in the migrate_repo + self.max_version = self.schema.repository.version().version + def tearDown(self): + table = sqlalchemy.Table("migrate_version", self.metadata, + autoload=True) + self.downgrade(0) + table.drop(self.engine, checkfirst=True) super(SqlUpgradeTests, self).tearDown() def test_blank_db_to_start(self): @@ -63,6 +81,20 @@ class SqlUpgradeTests(test.TestCase): version = migration.db_version() self.assertEqual(version, 0, "DB is at version 0") + def test_two_steps_forward_one_step_back(self): + """You should be able to cleanly undo a re-apply all upgrades. + + Upgrades are run in the following order:: + + 0 -> 1 -> 0 -> 1 -> 2 -> 1 -> 2 -> 3 -> 2 -> 3 ... + ^---------^ ^---------^ ^---------^ + + """ + for x in range(1, self.max_version + 1): + self.upgrade(x) + self.downgrade(x - 1) + self.upgrade(x) + def assertTableColumns(self, table_name, expected_cols): """Asserts that the table contains the expected set of columns.""" table = self.select_table(table_name) @@ -118,6 +150,9 @@ class SqlUpgradeTests(test.TestCase): session.commit() def test_downgrade_9_to_7(self): + self.upgrade(7) + self.populate_user_table() + self.populate_tenant_table() self.upgrade(9) self.downgrade(7) @@ -270,7 +305,7 @@ class SqlUpgradeTests(test.TestCase): ', '.join("'%s'" % v for v in d.values()))) def test_downgrade_to_0(self): - self.upgrade(13) + self.upgrade(self.max_version) self.downgrade(0) for table_name in ["user", "token", "role", "user_tenant_membership", "metadata"]: