Merge lp:~james-page/charm-helpers/lp1607961 into lp:charm-helpers

Proposed by James Page
Status: Merged
Merged at revision: 618
Proposed branch: lp:~james-page/charm-helpers/lp1607961
Merge into: lp:charm-helpers
Diff against target: 41 lines (+13/-0)
2 files modified
charmhelpers/contrib/storage/linux/ceph.py (+6/-0)
tests/contrib/storage/test_linux_ceph.py (+7/-0)
To merge this branch: bzr merge lp:~james-page/charm-helpers/lp1607961
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+301753@code.launchpad.net

Description of the change

Ensure that a minimum PG count of 2 is used in small OSD configurations with pools with very low weights.

To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'charmhelpers/contrib/storage/linux/ceph.py'
--- charmhelpers/contrib/storage/linux/ceph.py 2016-07-13 18:56:39 +0000
+++ charmhelpers/contrib/storage/linux/ceph.py 2016-08-02 09:12:06 +0000
@@ -87,6 +87,7 @@
87DEFAULT_PGS_PER_OSD_TARGET = 10087DEFAULT_PGS_PER_OSD_TARGET = 100
88DEFAULT_POOL_WEIGHT = 10.088DEFAULT_POOL_WEIGHT = 10.0
89LEGACY_PG_COUNT = 20089LEGACY_PG_COUNT = 200
90DEFAULT_MINIMUM_PGS = 2
9091
9192
92def validator(value, valid_type, valid_range=None):93def validator(value, valid_type, valid_range=None):
@@ -266,6 +267,11 @@
266 target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET267 target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
267 num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size268 num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
268269
270 # NOTE: ensure a sane minimum number of PGS otherwise we do get any
271 # reasonable data distribution in minimal OSD configurations
272 if num_pg < DEFAULT_MINIMUM_PGS:
273 num_pg = DEFAULT_MINIMUM_PGS
274
269 # The CRUSH algorithm has a slight optimization for placement groups275 # The CRUSH algorithm has a slight optimization for placement groups
270 # with powers of 2 so find the nearest power of 2. If the nearest276 # with powers of 2 so find the nearest power of 2. If the nearest
271 # power of 2 is more than 25% below the original value, the next277 # power of 2 is more than 25% below the original value, the next
272278
=== modified file 'tests/contrib/storage/test_linux_ceph.py'
--- tests/contrib/storage/test_linux_ceph.py 2016-07-13 18:56:39 +0000
+++ tests/contrib/storage/test_linux_ceph.py 2016-08-02 09:12:06 +0000
@@ -271,6 +271,13 @@
271 pg_num = p.get_pgs(pool_size=3, percent_data=100)271 pg_num = p.get_pgs(pool_size=3, percent_data=100)
272 self.assertEquals(512, pg_num)272 self.assertEquals(512, pg_num)
273273
274 # Test small % weight with minimal OSD count (3)
275 get_osds.return_value = range(1, 3)
276 self.test_config.set('expected-osd-count', None)
277 self.test_config.set('pgs-per-osd', None)
278 pg_num = p.get_pgs(pool_size=3, percent_data=0.1)
279 self.assertEquals(2, pg_num)
280
274 @patch.object(ceph_utils, 'get_osds')281 @patch.object(ceph_utils, 'get_osds')
275 def test_replicated_pool_create_old_ceph(self, get_osds):282 def test_replicated_pool_create_old_ceph(self, get_osds):
276 get_osds.return_value = None283 get_osds.return_value = None

Subscribers

People subscribed via source and target branches