Merge lp:~verterok/ubuntuone-client/vm-refactor into lp:ubuntuone-client
- vm-refactor
- Merge into trunk
| Status: | Merged |
|---|---|
| Approved by: | Rick McBride on 2010-01-27 |
| Approved revision: | not available |
| Merged at revision: | not available |
| Proposed branch: | lp:~verterok/ubuntuone-client/vm-refactor |
| Merge into: | lp:ubuntuone-client |
| Prerequisite: | lp:~verterok/ubuntuone-client/vm-pre-refactor-api-3 |
| Diff against target: |
2464 lines (+1161/-420) 9 files modified
tests/syncdaemon/test_dbus.py (+49/-34) tests/syncdaemon/test_eq_inotify.py (+108/-21) tests/syncdaemon/test_eventqueue.py (+3/-1) tests/syncdaemon/test_tools.py (+2/-2) tests/syncdaemon/test_vm.py (+562/-218) ubuntuone/syncdaemon/dbus_interface.py (+6/-2) ubuntuone/syncdaemon/event_queue.py (+115/-55) ubuntuone/syncdaemon/tools.py (+6/-6) ubuntuone/syncdaemon/volume_manager.py (+310/-81) |
| To merge this branch: | bzr merge lp:~verterok/ubuntuone-client/vm-refactor |
| Related bugs: | |
| Related blueprints: |
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Zachery Bir (community) | Approve on 2010-01-27 | ||
| Facundo Batista | 2010-01-26 | Approve on 2010-01-26 | |
|
Review via email:
|
|||
Commit Message
VolumeManager Volume hierarchy refactor and metadata migration to new version
Description of the Change
| Guillermo Gonzalez (verterok) wrote : | # |
- 328. By Guillermo Gonzalez on 2010-01-26
-
merge with trunk
- 329. By Guillermo Gonzalez on 2010-01-26
- 330. By Guillermo Gonzalez on 2010-01-26
-
fix tools.show_folders function to use volume_id instead of id
| Facundo Batista (facundo) wrote : | # |
=======
[ERROR]: tests.syncdaemo
Traceback (most recent call last):
File "/usr/lib/
result = result.
File "/usr/lib/
return g.throw(self.type, self.value, self.tb)
File "/home/
yield d
File "/usr/lib/
self.result = callback(
File "/home/
d.addCallba
File "/home/
out.
exceptions.
-------
| Guillermo Gonzalez (verterok) wrote : | # |
Already pushed and fixed in revno: 330, thanks!
| Facundo Batista (facundo) wrote : | # |
Ok, that is already fixed, and all the tests went ok!
Preview Diff
| 1 | === modified file 'tests/syncdaemon/test_dbus.py' |
| 2 | --- tests/syncdaemon/test_dbus.py 2010-01-25 22:28:46 +0000 |
| 3 | +++ tests/syncdaemon/test_dbus.py 2010-01-26 20:35:29 +0000 |
| 4 | @@ -36,7 +36,7 @@ |
| 5 | DBUS_IFACE_FOLDERS_NAME, |
| 6 | EventListener, |
| 7 | ) |
| 8 | -from ubuntuone.syncdaemon.volume_manager import Share, UDF |
| 9 | +from ubuntuone.syncdaemon.volume_manager import Share, Shared, UDF |
| 10 | from ubuntuone.syncdaemon.tools import DBusClient |
| 11 | from ubuntuone.syncdaemon import event_queue, states, main, config |
| 12 | from contrib.testing.testcase import ( |
| 13 | @@ -500,21 +500,23 @@ |
| 14 | access_level='Read', accepted=False)) |
| 15 | client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME) |
| 16 | d = defer.Deferred() |
| 17 | - def shares_handler(shares): |
| 18 | + def check(shares): |
| 19 | """ handle get_shares reply """ |
| 20 | self.assertEquals(1, len(shares)) |
| 21 | for share in shares: |
| 22 | - if share['id'] == '': |
| 23 | - self.assertEquals('', str(share['id'])) |
| 24 | + if share['volume_id'] == '': |
| 25 | + self.assertEquals('', str(share['volume_id'])) |
| 26 | self.assertEquals(self.root_dir, str(share['path'])) |
| 27 | self.assertEquals('Modify', str(share['access_level'])) |
| 28 | self.assertEquals('False', str(share['accepted'])) |
| 29 | else: |
| 30 | - self.assertEquals('share_id', str(share['id'])) |
| 31 | + self.assertEquals('share_id', str(share['volume_id'])) |
| 32 | self.assertEquals(share_path, str(share['path'])) |
| 33 | self.assertEquals('Read', str(share['access_level'])) |
| 34 | self.assertEquals('False', str(share['accepted'])) |
| 35 | - d.callback(True) |
| 36 | + |
| 37 | + def shares_handler(shares): |
| 38 | + d.callback(shares) |
| 39 | |
| 40 | client.call_method('get_shares', reply_handler=shares_handler, |
| 41 | error_handler=self.error_handler) |
| 42 | @@ -539,7 +541,7 @@ |
| 43 | def check(result): |
| 44 | """the async checker""" |
| 45 | self.assertEquals('Yes', result['answer']) |
| 46 | - self.assertEquals('share_id', result['share_id']) |
| 47 | + self.assertEquals('share_id', result['volume_id']) |
| 48 | self.assertEquals(True, self.main.vm.shares['share_id'].accepted) |
| 49 | |
| 50 | d.addCallback(check) |
| 51 | @@ -563,7 +565,7 @@ |
| 52 | def check(result): |
| 53 | """the async checker""" |
| 54 | self.assertEquals('No', result['answer']) |
| 55 | - self.assertEquals('share_id', result['share_id']) |
| 56 | + self.assertEquals('share_id', result['volume_id']) |
| 57 | self.assertEquals(False, self.main.vm.shares['share_id'].accepted) |
| 58 | |
| 59 | d.addCallback(check) |
| 60 | @@ -648,8 +650,8 @@ |
| 61 | self.assertEquals(1, len(results)) |
| 62 | shared = results[0] |
| 63 | self.assertEquals(a_dir, str(shared['path'])) |
| 64 | - self.assertEquals('node_id', str(shared['subtree'])) |
| 65 | - self.assertEquals('share_id', str(shared['id'])) |
| 66 | + self.assertEquals('node_id', str(shared['node_id'])) |
| 67 | + self.assertEquals('share_id', str(shared['volume_id'])) |
| 68 | self.assertEquals('View', str(shared['access_level'])) |
| 69 | d.callback(True) |
| 70 | client.call_method('get_shared', |
| 71 | @@ -680,14 +682,15 @@ |
| 72 | self.assertEquals(1, len(results)) |
| 73 | shared = results[0] |
| 74 | self.assertEquals('', str(shared['path'])) |
| 75 | - self.assertEquals('node_id', str(shared['subtree'])) |
| 76 | - self.assertEquals('share_id', str(shared['id'])) |
| 77 | + self.assertEquals('node_id', str(shared['node_id'])) |
| 78 | + self.assertEquals('share_id', str(shared['volume_id'])) |
| 79 | self.assertEquals('View', str(shared['access_level'])) |
| 80 | d.callback(True) |
| 81 | client.call_method('get_shared', |
| 82 | reply_handler=reply_handler, |
| 83 | error_handler=self.error_handler) |
| 84 | return d |
| 85 | + |
| 86 | def test_refresh_shares(self): |
| 87 | """ Just check that refresh_shares method API works. """ |
| 88 | client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME) |
| 89 | @@ -800,7 +803,7 @@ |
| 90 | a_dir = os.path.join(self.root_dir, u'ñoño'.encode('utf-8')) |
| 91 | self.fs_manager.create(a_dir, "", is_dir=True) |
| 92 | self.fs_manager.set_node_id(a_dir, "node_id") |
| 93 | - share = Share(path=a_dir, volume_id='shared_id', name=u'ñoño_shared', |
| 94 | + share = Shared(path=a_dir, volume_id='shared_id', name=u'ñoño_shared', |
| 95 | access_level='View', other_username=u'test_username', |
| 96 | node_id='node_id') |
| 97 | self.main.vm.add_shared(share) |
| 98 | @@ -812,8 +815,8 @@ |
| 99 | self.assertEquals(1, len(results)) |
| 100 | shared = results[0] |
| 101 | self.assertEquals(a_dir, shared['path'].encode('utf-8')) |
| 102 | - self.assertEquals('node_id', str(shared['subtree'])) |
| 103 | - self.assertEquals('shared_id', str(shared['id'])) |
| 104 | + self.assertEquals('node_id', str(shared['node_id'])) |
| 105 | + self.assertEquals('shared_id', str(shared['volume_id'])) |
| 106 | self.assertEquals('View', str(shared['access_level'])) |
| 107 | |
| 108 | d.addCallback(check) |
| 109 | @@ -1072,14 +1075,15 @@ |
| 110 | u'visible_name', 'Write') |
| 111 | |
| 112 | d = defer.Deferred() |
| 113 | - def share_handler(share): |
| 114 | + def check(share): |
| 115 | """ handler for ShareChanged signal. """ |
| 116 | - self.assertEquals('a_share_id', str(share['id'])) |
| 117 | + self.assertEquals('a_share_id', str(share['volume_id'])) |
| 118 | self.assertEquals(share_path, str(share['path'])) |
| 119 | self.assertEquals('Write', str(share['access_level'])) |
| 120 | - self.assertEquals('False', str(share['accepted'])) |
| 121 | - d.callback(True) |
| 122 | - |
| 123 | + self.assertEquals('', str(share['accepted'])) |
| 124 | + d.addCallback(check) |
| 125 | + def share_handler(result): |
| 126 | + d.callback(result) |
| 127 | match = self.bus.add_signal_receiver(share_handler, |
| 128 | signal_name='ShareChanged') |
| 129 | self.signal_receivers.add(match) |
| 130 | @@ -1089,25 +1093,36 @@ |
| 131 | def test_share_deleted(self): |
| 132 | """ Test the ShareDeleted signal. """ |
| 133 | share_path = os.path.join(self.main.shares_dir, 'share') |
| 134 | - self.main.vm.add_share(Share(path=share_path, volume_id='a_share_id', |
| 135 | - access_level='Read', accepted=False)) |
| 136 | share_holder = NotifyShareHolder.from_params('a_share_id', 'subtree', |
| 137 | u'fake_share', |
| 138 | u'test_username', |
| 139 | u'visible_name', 'Read') |
| 140 | |
| 141 | + self.main.vm.add_share(Share.from_notify_holder(share_holder, share_path)) |
| 142 | d = defer.Deferred() |
| 143 | def share_handler(share_dict): |
| 144 | """ handler for ShareDeletedsignal. """ |
| 145 | - expected_dict = dict(share_id='a_share_id', |
| 146 | - subtree='subtree', |
| 147 | - share_name=u'fake_share', |
| 148 | - from_username=u'test_username', |
| 149 | - from_visible_name=u'visible_name', |
| 150 | + d.callback(share_dict) |
| 151 | + |
| 152 | + match = self.bus.add_signal_receiver(share_handler, |
| 153 | + signal_name='ShareDeleted') |
| 154 | + self.signal_receivers.add(match) |
| 155 | + |
| 156 | + def check(share_dict): |
| 157 | + """Check the result.""" |
| 158 | + expected_dict = dict(volume_id='a_share_id', |
| 159 | + node_id='subtree', |
| 160 | + name=u'fake_share', |
| 161 | + other_username=u'test_username', |
| 162 | + other_visible_name=u'visible_name', |
| 163 | + free_bytes='', |
| 164 | + path=share_path, |
| 165 | + accepted='', |
| 166 | access_level='Read') |
| 167 | + expected_dict['type'] = 'Share' |
| 168 | for k, v in share_dict.items(): |
| 169 | self.assertEquals(expected_dict[str(k)], str(v)) |
| 170 | - d.callback(True) |
| 171 | + d.addCallback(check) |
| 172 | |
| 173 | match = self.bus.add_signal_receiver(share_handler, |
| 174 | signal_name='ShareDeleted') |
| 175 | @@ -1495,7 +1510,7 @@ |
| 176 | udf_dict = self.dbus_iface.folders._get_udf_dict(udf) |
| 177 | # check the path it's unicode |
| 178 | self.assertEquals(udf_dict['path'], udf.path.decode('utf-8')) |
| 179 | - self.assertEquals(udf_dict['id'], udf.volume_id) |
| 180 | + self.assertEquals(udf_dict['volume_id'], udf.id) |
| 181 | self.assertEquals(udf_dict['suggested_path'], udf.suggested_path) |
| 182 | self.assertEquals(udf_dict['node_id'], udf.node_id) |
| 183 | self.assertFalse(udf_dict['subscribed']) |
| 184 | @@ -1684,13 +1699,13 @@ |
| 185 | d = defer.Deferred() |
| 186 | def delete_volume(path): |
| 187 | """Fake delete_volume""" |
| 188 | - self.main.event_q.push("AQ_DELETE_VOLUME_OK", volume_id=udf.volume_id) |
| 189 | + self.main.event_q.push("AQ_DELETE_VOLUME_OK", volume_id=udf.id) |
| 190 | self.main.action_q.delete_volume = delete_volume |
| 191 | def deleted_handler(info): |
| 192 | """FolderDeleted handler.""" |
| 193 | self.assertRaises(KeyError, self.main.fs.get_by_path, |
| 194 | info['path'].decode('utf-8')) |
| 195 | - self.assertRaises(KeyError, self.main.vm.get_volume, info['id']) |
| 196 | + self.assertRaises(KeyError, self.main.vm.get_volume, info['volume_id']) |
| 197 | d.callback(True) |
| 198 | match = self.bus.add_signal_receiver(deleted_handler, |
| 199 | signal_name='FolderDeleted') |
| 200 | @@ -1719,7 +1734,7 @@ |
| 201 | self.main.action_q.delete_volume = delete_volume |
| 202 | def deleted_error_handler(info, error): |
| 203 | """FolderDeleteError handler""" |
| 204 | - self.assertEquals(info['id'], udf.volume_id) |
| 205 | + self.assertEquals(info['volume_id'], udf.volume_id) |
| 206 | self.assertEquals(error, "I'm broken") |
| 207 | d.callback(True) |
| 208 | match = self.bus.add_signal_receiver(deleted_error_handler, |
| 209 | @@ -1821,7 +1836,7 @@ |
| 210 | """FolderDeleted handler.""" |
| 211 | self.assertRaises(KeyError, self.main.fs.get_by_path, |
| 212 | info['path'].decode('utf-8')) |
| 213 | - self.assertRaises(KeyError, self.main.vm.get_volume, info['id']) |
| 214 | + self.assertRaises(KeyError, self.main.vm.get_volume, info['volume_id']) |
| 215 | d.callback(True) |
| 216 | match = self.bus.add_signal_receiver(deleted_handler, |
| 217 | signal_name='ShareDeleted') |
| 218 | @@ -1852,7 +1867,7 @@ |
| 219 | self.main.action_q.delete_volume = delete_volume |
| 220 | def deleted_error_handler(info, error): |
| 221 | """FolderDeleteError handler""" |
| 222 | - self.assertEquals(info['id'], share.volume_id) |
| 223 | + self.assertEquals(info['volume_id'], share.volume_id) |
| 224 | self.assertEquals(error, "I'm broken") |
| 225 | d.callback(True) |
| 226 | match = self.bus.add_signal_receiver(deleted_error_handler, |
| 227 | |
| 228 | === modified file 'tests/syncdaemon/test_eq_inotify.py' |
| 229 | --- tests/syncdaemon/test_eq_inotify.py 2010-01-26 19:15:10 +0000 |
| 230 | +++ tests/syncdaemon/test_eq_inotify.py 2010-01-26 20:35:29 +0000 |
| 231 | @@ -33,31 +33,104 @@ |
| 232 | class WatchTests(BaseEQTestCase): |
| 233 | """Test the EQ API to add and remove watchs.""" |
| 234 | |
| 235 | - def test_add_watch(self): |
| 236 | - """Test that watchs can be added.""" |
| 237 | + def _create_udf(self, path): |
| 238 | + """Create an UDF and returns it and the volume""" |
| 239 | + os.makedirs(path) |
| 240 | + udf = volume_manager.UDF("vol_id", "node_id", path, path, True) |
| 241 | + self.vm.add_udf(udf) |
| 242 | + |
| 243 | + def test_add_general_watch(self): |
| 244 | + """Test that general watchs can be added.""" |
| 245 | # we should have what we asked for |
| 246 | self.eq.inotify_add_watch(self.root_dir) |
| 247 | - # pylint: disable-msg=W0212 |
| 248 | - self.assertTrue(self.root_dir in self.eq._watchs) |
| 249 | - |
| 250 | - # we shouldn't have other stuff |
| 251 | - self.assertTrue("not-added-dir" not in self.eq._watchs) |
| 252 | - |
| 253 | - def test_rm_watch(self): |
| 254 | - """Test that watchs can be removed.""" |
| 255 | - # remove what we added |
| 256 | + |
| 257 | + # check only added dir in watchs |
| 258 | + # pylint: disable-msg=W0212 |
| 259 | + self.assertTrue(self.root_dir in self.eq._general_watchs) |
| 260 | + self.assertTrue("not-added-dir" not in self.eq._general_watchs) |
| 261 | + |
| 262 | + # nothing in the udf ancestors watch |
| 263 | + self.assertEqual(self.eq._ancestors_watchs, {}) |
| 264 | + |
| 265 | + def test_add_watch_on_udf_ancestor(self): |
| 266 | + """Test that ancestors watchs can be added.""" |
| 267 | + # create the udf and add the watch |
| 268 | + path_udf = os.path.join(self.home_dir, "path/to/UDF") |
| 269 | + self._create_udf(path_udf) |
| 270 | + path_ancestor = os.path.join(self.home_dir, "path") |
| 271 | + self.eq.inotify_add_watch(path_ancestor) |
| 272 | + |
| 273 | + # check only added dir in watchs |
| 274 | + # pylint: disable-msg=W0212 |
| 275 | + self.assertTrue(path_ancestor in self.eq._ancestors_watchs) |
| 276 | + self.assertTrue("not-added-dir" not in self.eq._ancestors_watchs) |
| 277 | + |
| 278 | + # nothing in the general watch |
| 279 | + self.assertEqual(self.eq._general_watchs, {}) |
| 280 | + |
| 281 | + def test_add_watch_on_udf_exact(self): |
| 282 | + """Test adding the watch exactly on UDF.""" |
| 283 | + # create the udf and add the watch |
| 284 | + path_udf = os.path.join(self.home_dir, "path/to/UDF") |
| 285 | + self._create_udf(path_udf) |
| 286 | + self.eq.inotify_add_watch(path_udf) |
| 287 | + |
| 288 | + # pylint: disable-msg=W0212 |
| 289 | + self.assertTrue(path_udf in self.eq._general_watchs) |
| 290 | + self.assertEqual(self.eq._ancestors_watchs, {}) |
| 291 | + |
| 292 | + def test_add_watch_on_udf_child(self): |
| 293 | + """Test adding the watch inside UDF.""" |
| 294 | + # create the udf and add the watch |
| 295 | + path_udf = os.path.join(self.home_dir, "path/to/UDF") |
| 296 | + self._create_udf(path_udf) |
| 297 | + path_ancestor = os.path.join(self.home_dir, "path/to/UDF/inside") |
| 298 | + os.mkdir(path_ancestor) |
| 299 | + self.eq.inotify_add_watch(path_ancestor) |
| 300 | + |
| 301 | + # pylint: disable-msg=W0212 |
| 302 | + self.assertTrue(path_ancestor in self.eq._general_watchs) |
| 303 | + self.assertEqual(self.eq._ancestors_watchs, {}) |
| 304 | + |
| 305 | + def test_rm_watch_wrong(self): |
| 306 | + """Test that general watchs can be removed.""" |
| 307 | + # add two types of watchs |
| 308 | self.eq.inotify_add_watch(self.root_dir) |
| 309 | - self.eq.inotify_rm_watch(self.root_dir) |
| 310 | - # pylint: disable-msg=W0212 |
| 311 | - self.assertTrue(self.root_dir not in self.eq._watchs) |
| 312 | + path_udf = os.path.join(self.home_dir, "path/to/UDF") |
| 313 | + self._create_udf(path_udf) |
| 314 | + path_ancestor = os.path.join(self.home_dir, "path") |
| 315 | + self.eq.inotify_add_watch(path_ancestor) |
| 316 | |
| 317 | # remove different stuff |
| 318 | - self.eq.inotify_add_watch(self.root_dir) |
| 319 | self.assertRaises(ValueError, |
| 320 | self.eq.inotify_rm_watch, "not-added-dir") |
| 321 | |
| 322 | - def test_has_watch(self): |
| 323 | - """Test that a path is watched.""" |
| 324 | + def test_rm_watch_general(self): |
| 325 | + """Test that general watchs can be removed.""" |
| 326 | + # remove what we added |
| 327 | + self.eq.inotify_add_watch(self.root_dir) |
| 328 | + self.eq.inotify_rm_watch(self.root_dir) |
| 329 | + |
| 330 | + # pylint: disable-msg=W0212 |
| 331 | + self.assertEqual(self.eq._general_watchs, {}) |
| 332 | + self.assertEqual(self.eq._ancestors_watchs, {}) |
| 333 | + |
| 334 | + def test_rm_watch_ancestor(self): |
| 335 | + """Test that ancestor watchs can be removed.""" |
| 336 | + # create the udf and add the watch |
| 337 | + path_udf = os.path.join(self.home_dir, "path/to/UDF") |
| 338 | + self._create_udf(path_udf) |
| 339 | + path_ancestor = os.path.join(self.home_dir, "path") |
| 340 | + self.eq.inotify_add_watch(path_ancestor) |
| 341 | + |
| 342 | + # remove what we added |
| 343 | + self.eq.inotify_rm_watch(path_ancestor) |
| 344 | + # pylint: disable-msg=W0212 |
| 345 | + self.assertEqual(self.eq._general_watchs, {}) |
| 346 | + self.assertEqual(self.eq._ancestors_watchs, {}) |
| 347 | + |
| 348 | + def test_has_watch_general(self): |
| 349 | + """Test that a general path is watched.""" |
| 350 | self.assertFalse(self.eq.inotify_has_watch(self.root_dir)) |
| 351 | |
| 352 | # add |
| 353 | @@ -68,6 +141,24 @@ |
| 354 | self.eq.inotify_rm_watch(self.root_dir) |
| 355 | self.assertFalse(self.eq.inotify_has_watch(self.root_dir)) |
| 356 | |
| 357 | + def test_has_watch_ancestor(self): |
| 358 | + """Test that an ancestor path is watched.""" |
| 359 | + path_udf = os.path.join(self.home_dir, "path/to/UDF") |
| 360 | + self._create_udf(path_udf) |
| 361 | + path_ancestor = os.path.join(self.home_dir, "path") |
| 362 | + |
| 363 | + self.assertFalse(self.eq.inotify_has_watch(path_ancestor)) |
| 364 | + |
| 365 | + # add |
| 366 | + # create the udf and add the watch |
| 367 | + self.eq.inotify_add_watch(path_ancestor) |
| 368 | + self.assertTrue(self.eq.inotify_has_watch(path_ancestor)) |
| 369 | + |
| 370 | + # remove |
| 371 | + self.eq.inotify_rm_watch(path_ancestor) |
| 372 | + self.assertFalse(self.eq.inotify_has_watch(path_ancestor)) |
| 373 | + |
| 374 | + |
| 375 | class DynamicHitMe(object): |
| 376 | """Helper class to test a sequence of signals.""" |
| 377 | |
| 378 | @@ -1157,7 +1248,6 @@ |
| 379 | |
| 380 | @param msg: A string describing the failure that's included in the |
| 381 | exception. |
| 382 | - |
| 383 | """ |
| 384 | if not first == second: |
| 385 | if msg is None: |
| 386 | @@ -1246,9 +1336,6 @@ |
| 387 | suggested_path, path, True) |
| 388 | other_ancestors = other_udf.ancestors |
| 389 | |
| 390 | - # pylint: disable-msg=W0212 |
| 391 | - assert not self.eq._processor._is_udf_ancestor(path) |
| 392 | - |
| 393 | os.makedirs(path) |
| 394 | # every ancestor has a watch already, added by LocalRescan. Copy that. |
| 395 | self.eq.inotify_add_watch(other_udf.path) |
| 396 | |
| 397 | === modified file 'tests/syncdaemon/test_eventqueue.py' |
| 398 | --- tests/syncdaemon/test_eventqueue.py 2009-11-20 22:00:25 +0000 |
| 399 | +++ tests/syncdaemon/test_eventqueue.py 2010-01-26 20:35:29 +0000 |
| 400 | @@ -39,9 +39,11 @@ |
| 401 | self.fsmdir = self.mktemp('fsmdir') |
| 402 | self.partials_dir = self.mktemp('partials_dir') |
| 403 | self.root_dir = self.mktemp('root_dir') |
| 404 | + self.home_dir = self.mktemp('home_dir') |
| 405 | + self.vm = testcase.FakeVolumeManager(self.root_dir) |
| 406 | self.fs = filesystem_manager.FileSystemManager(self.fsmdir, |
| 407 | self.partials_dir, |
| 408 | - testcase.FakeVolumeManager(self.root_dir)) |
| 409 | + self.vm) |
| 410 | self.fs.create(path=self.root_dir, |
| 411 | share_id='', is_dir=True) |
| 412 | self.fs.set_by_path(path=self.root_dir, |
| 413 | |
| 414 | === modified file 'tests/syncdaemon/test_tools.py' |
| 415 | --- tests/syncdaemon/test_tools.py 2010-01-26 19:11:29 +0000 |
| 416 | +++ tests/syncdaemon/test_tools.py 2010-01-26 20:35:29 +0000 |
| 417 | @@ -182,7 +182,7 @@ |
| 418 | def check(result): |
| 419 | """do the asserts""" |
| 420 | self.assertEquals('Yes', result['answer']) |
| 421 | - self.assertEquals('share_id', result['share_id']) |
| 422 | + self.assertEquals('share_id', result['volume_id']) |
| 423 | self.assertEquals(True, self.main.vm.shares['share_id'].accepted) |
| 424 | |
| 425 | d.addCallback(check) |
| 426 | @@ -199,7 +199,7 @@ |
| 427 | def check(result): |
| 428 | """do the asserts""" |
| 429 | self.assertEquals('No', result['answer']) |
| 430 | - self.assertEquals('share_id', result['share_id']) |
| 431 | + self.assertEquals('share_id', result['volume_id']) |
| 432 | self.assertEquals(False, self.main.vm.shares['share_id'].accepted) |
| 433 | |
| 434 | d.addCallback(check) |
| 435 | |
| 436 | === modified file 'tests/syncdaemon/test_vm.py' |
| 437 | --- tests/syncdaemon/test_vm.py 2010-01-26 19:40:05 +0000 |
| 438 | +++ tests/syncdaemon/test_vm.py 2010-01-26 20:35:29 +0000 |
| 439 | @@ -21,7 +21,6 @@ |
| 440 | import logging |
| 441 | import os |
| 442 | import uuid |
| 443 | -import sys |
| 444 | |
| 445 | from ubuntuone.storageprotocol.client import ListShares, ListVolumes |
| 446 | from ubuntuone.storageprotocol.sharersp import ( |
| 447 | @@ -36,10 +35,16 @@ |
| 448 | ) |
| 449 | from ubuntuone.syncdaemon.volume_manager import ( |
| 450 | Share, |
| 451 | + Shared, |
| 452 | + UDF, |
| 453 | + Root, |
| 454 | + _Share, |
| 455 | + _UDF, |
| 456 | allow_writes, |
| 457 | - UDF, |
| 458 | VolumeManager, |
| 459 | LegacyShareFileShelf, |
| 460 | + MetadataUpgrader, |
| 461 | + VMFileShelf, |
| 462 | ) |
| 463 | from twisted.internet import defer, reactor |
| 464 | |
| 465 | @@ -1373,64 +1378,118 @@ |
| 466 | self.assertTrue(isinstance(share.node_id, basestring)) |
| 467 | |
| 468 | |
| 469 | -class ShareShelfUpgradeTests(BaseTwistedTestCase): |
| 470 | - """ Tests for shares shelf upgrades""" |
| 471 | +class MetadataTestCase(BaseTwistedTestCase): |
| 472 | + md_version_None = False |
| 473 | + main = None |
| 474 | + data_dir = None |
| 475 | + share_md_dir = None |
| 476 | + shared_md_dir = None |
| 477 | + partials_dir = None |
| 478 | + u1_dir = None |
| 479 | + root_dir = None |
| 480 | + shares_dir = None |
| 481 | + shares_dir_link = None |
| 482 | |
| 483 | def setUp(self): |
| 484 | - """ setup the test """ |
| 485 | + """Create some directories.""" |
| 486 | BaseTwistedTestCase.setUp(self) |
| 487 | - self.root_dir = self.mktemp('Ubuntu One') |
| 488 | - self.data_dir = self.mktemp('data_dir') |
| 489 | - self.partials_dir = self.mktemp('partials_dir') |
| 490 | - self.shares_dir = self.mktemp(os.path.join('Ubuntu One', |
| 491 | - 'Shared with Me')) |
| 492 | + self.data_dir = os.path.join(self.tmpdir, 'data_dir') |
| 493 | + self.vm_data_dir = os.path.join(self.tmpdir, 'data_dir', 'vm') |
| 494 | + self.partials_dir = self.mktemp('partials') |
| 495 | + self.u1_dir = os.path.join(self.tmpdir, 'Ubuntu One') |
| 496 | + self.version_file = os.path.join(self.vm_data_dir, '.version') |
| 497 | |
| 498 | def tearDown(self): |
| 499 | - """Cleanup main and remove the temp dir.""" |
| 500 | - main = getattr(self, 'main', None) |
| 501 | - if main: |
| 502 | - main.shutdown() |
| 503 | - if os.path.exists(self.root_dir): |
| 504 | - self.rmtree(self.root_dir) |
| 505 | - if os.path.exists(self.data_dir): |
| 506 | - self.rmtree(self.data_dir) |
| 507 | - if os.path.exists(self.shares_dir): |
| 508 | - self.rmtree(self.shares_dir) |
| 509 | + """Cleanup all the cruft.""" |
| 510 | + for path in [self.data_dir, self.partials_dir, self.root_dir, |
| 511 | + self.shares_dir]: |
| 512 | + if path and os.path.exists(path): |
| 513 | + self.rmtree(path) |
| 514 | + if self.main: |
| 515 | + self.main.shutdown() |
| 516 | VolumeManager.METADATA_VERSION = CURRENT_METADATA_VERSION |
| 517 | - return BaseTwistedTestCase.tearDown(self) |
| 518 | + BaseTwistedTestCase.tearDown(self) |
| 519 | |
| 520 | def check_version(self): |
| 521 | - """ check if the current version in the .version file is the lastone. |
| 522 | - """ |
| 523 | - with open(os.path.join(self.data_dir, 'vm', '.version'), 'r') as fd: |
| 524 | + """Check if the current version in the .version file is the last one.""" |
| 525 | + with open(self.version_file, 'r') as fd: |
| 526 | self.assertEquals(CURRENT_METADATA_VERSION, fd.read().strip()) |
| 527 | |
| 528 | - def test_0_to_1(self): |
| 529 | - """ Test the upgrade from the first shelf layout version to v. 1""" |
| 530 | - # ensure a clean data_dir |
| 531 | - self.rmtree(self.data_dir) |
| 532 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 533 | - old_shelf = LegacyShareFileShelf(vm_data_dir) |
| 534 | + def set_md_version(self, md_version): |
| 535 | + """Write md_version to the .version file.""" |
| 536 | + if not os.path.exists(self.vm_data_dir): |
| 537 | + os.makedirs(self.vm_data_dir) |
| 538 | + with open(self.version_file, 'w') as fd: |
| 539 | + fd.write(md_version) |
| 540 | + |
| 541 | + |
| 542 | +class MetadataOldLayoutTests(MetadataTestCase): |
| 543 | + """Tests for 'old' layouts and metadata upgrade""" |
| 544 | + |
| 545 | + def setUp(self): |
| 546 | + MetadataTestCase.setUp(self) |
| 547 | + self.root_dir = os.path.join(self.u1_dir, 'My Files') |
| 548 | + self.shares_dir = os.path.join(self.u1_dir, 'Shared With Me') |
| 549 | + self.new_root_dir = self.u1_dir |
| 550 | + self.new_shares_dir = self.mktemp('shares_dir') |
| 551 | + |
| 552 | + def tearDown(self): |
| 553 | + """Cleanup all the cruft.""" |
| 554 | + for path in [self.u1_dir, self.new_shares_dir]: |
| 555 | + if path and os.path.exists(path): |
| 556 | + self.rmtree(path) |
| 557 | + MetadataTestCase.tearDown(self) |
| 558 | + |
| 559 | + def _build_layout_version_0(self): |
| 560 | + """Build the dir structure to mimic md v.0/None.""" |
| 561 | + self.share_md_dir = os.path.join(self.tmpdir, 'data_dir', 'vm') |
| 562 | + os.makedirs(self.share_md_dir) |
| 563 | + os.makedirs(self.root_dir) |
| 564 | + os.makedirs(self.shares_dir) |
| 565 | + |
| 566 | + def _build_layout_version_1(self): |
| 567 | + """Build the dir structure to mimic md v.1""" |
| 568 | + self.share_md_dir = os.path.join(self.vm_data_dir, 'shares') |
| 569 | + self.shared_md_dir = os.path.join(self.vm_data_dir, 'shared') |
| 570 | + os.makedirs(self.share_md_dir) |
| 571 | + os.makedirs(self.shared_md_dir) |
| 572 | + os.makedirs(self.root_dir) |
| 573 | + os.makedirs(self.shares_dir) |
| 574 | + |
| 575 | + def _set_permissions(self): |
| 576 | + """Set the RO perms in the root and the shares directory.""" |
| 577 | + os.chmod(self.shares_dir, 0500) |
| 578 | + os.chmod(self.u1_dir, 0500) |
| 579 | + |
| 580 | + def test_upgrade_0(self): |
| 581 | + """Test the upgrade from the first shelf layout version.""" |
| 582 | + self._build_layout_version_0() |
| 583 | + old_shelf = LegacyShareFileShelf(self.share_md_dir) |
| 584 | # add the root_uuid key |
| 585 | - root_share = Share(path=self.root_dir) |
| 586 | + root_share = _Share(path=self.root_dir) |
| 587 | root_share.access_level = 'Modify' |
| 588 | old_shelf[''] = root_share |
| 589 | for idx in range(1, 10): |
| 590 | - old_shelf[str(uuid.uuid4())] = \ |
| 591 | - Share(path=os.path.join(self.shares_dir, str(idx))) |
| 592 | - # LegacyShareFileShelf.keys returns a generator |
| 593 | + sid = str(uuid.uuid4()) |
| 594 | + old_shelf[sid] = _Share(path=os.path.join(self.shares_dir, str(idx)), |
| 595 | + share_id=sid) |
| 596 | + # ShareFileShelf.keys returns a generator |
| 597 | old_keys = [key for key in old_shelf.keys()] |
| 598 | self.assertEquals(10, len(old_keys)) |
| 599 | + if self.md_version_None: |
| 600 | + self.set_md_version('') |
| 601 | + # set the ro permissions |
| 602 | + self._set_permissions() |
| 603 | # we want to keep a refernece to main in order to shutdown |
| 604 | # pylint: disable-msg=W0201 |
| 605 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 606 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 607 | self.data_dir, self.partials_dir) |
| 608 | new_keys = [new_key for new_key in self.main.vm.shares.keys()] |
| 609 | self.assertEquals(10, len(new_keys)) |
| 610 | for new_key in new_keys: |
| 611 | self.assertIn(new_key, old_keys) |
| 612 | # check the old data is still there (in the backup) |
| 613 | - backup_shelf = LegacyShareFileShelf(os.path.join(vm_data_dir, '0.bkp')) |
| 614 | + backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp')) |
| 615 | backup_keys = [key for key in backup_shelf.keys()] |
| 616 | for old_key in old_keys: |
| 617 | self.assertIn(old_key, backup_keys) |
| 618 | @@ -1438,18 +1497,13 @@ |
| 619 | self.assertIn(new_key, backup_keys) |
| 620 | self.check_version() |
| 621 | |
| 622 | - def test_1_to_2(self): |
| 623 | - """ Test the upgrade from v.1 of the metadata to v.2""" |
| 624 | - # ensure a clean data_dir |
| 625 | - self.rmtree(self.data_dir) |
| 626 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 627 | - vm_shares_dir = os.path.join(vm_data_dir, 'shares') |
| 628 | - os.makedirs(vm_data_dir) |
| 629 | + def test_upgrade_1(self): |
| 630 | + """ Test the upgrade from v.1""" |
| 631 | + self._build_layout_version_1() |
| 632 | # write the .version file with v.1 |
| 633 | - with open(os.path.join(vm_data_dir, '.version'), 'w') as fd: |
| 634 | - fd.write('1') |
| 635 | + self.set_md_version('1') |
| 636 | |
| 637 | - share_file = os.path.join(vm_shares_dir, |
| 638 | + share_file = os.path.join(self.share_md_dir, |
| 639 | '0/6/6/0664f050-9254-45c5-9f31-3482858709e4') |
| 640 | os.makedirs(os.path.dirname(share_file)) |
| 641 | # this is the str of a version 2 pickle |
| 642 | @@ -1465,222 +1519,228 @@ |
| 643 | with open(share_file, 'w') as fd: |
| 644 | fd.write(share_value) |
| 645 | |
| 646 | - # fake the old namespace |
| 647 | - sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager'] = \ |
| 648 | - sys.modules['ubuntuone.syncdaemon.volume_manager'] |
| 649 | # try to load the shelf |
| 650 | - old_shelf = LegacyShareFileShelf(vm_shares_dir) |
| 651 | + old_shelf = LegacyShareFileShelf(self.share_md_dir) |
| 652 | share = old_shelf['0664f050-9254-45c5-9f31-3482858709e4'] |
| 653 | self.assertTrue(share is not None) |
| 654 | - del sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager'] |
| 655 | + if self.md_version_None: |
| 656 | + self.set_md_version('') |
| 657 | + |
| 658 | + self._set_permissions() |
| 659 | # now use the real VolumeManager |
| 660 | # we want to keep a refernece to main in order to shutdown |
| 661 | # pylint: disable-msg=W0201 |
| 662 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 663 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 664 | self.data_dir, self.partials_dir) |
| 665 | new_keys = [new_key for new_key in self.main.vm.shares.keys()] |
| 666 | self.assertEquals(2, len(new_keys)) # the fake share plus root |
| 667 | - for key in ['', share.volume_id]: |
| 668 | + for key in ['', share.id]: |
| 669 | self.assertIn(key, new_keys) |
| 670 | self.check_version() |
| 671 | |
| 672 | - def test_2_to_3(self): |
| 673 | - """ Test the upgrade from v.2 of the metadata to v.3""" |
| 674 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 675 | - os.makedirs(vm_data_dir) |
| 676 | - with open(os.path.join(vm_data_dir, '.version'), 'w') as fd: |
| 677 | - fd.write('2') |
| 678 | + def test_upgrade_2(self): |
| 679 | + """Test the upgrade from v.2.""" |
| 680 | + self._build_layout_version_1() |
| 681 | + self.set_md_version('2') |
| 682 | open(self.root_dir + '/foo.conflict', 'w').close() |
| 683 | open(self.root_dir + '/foo.conflict.23', 'w').close() |
| 684 | open(self.shares_dir + '/bar.partial', 'w').close() |
| 685 | os.mkdir(self.shares_dir + '/baz/') |
| 686 | open(self.shares_dir + '/baz/baz.conflict', 'w').close() |
| 687 | os.chmod(self.shares_dir + '/baz/', 0500) |
| 688 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 689 | + if self.md_version_None: |
| 690 | + self.set_md_version('') |
| 691 | + self._set_permissions() |
| 692 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 693 | self.data_dir, self.partials_dir) |
| 694 | - self.assertTrue(os.path.exists(self.root_dir + '/foo.u1conflict')) |
| 695 | - self.assertTrue(os.path.exists(self.root_dir + '/foo.u1conflict.23')) |
| 696 | - self.assertTrue(os.path.exists(self.shares_dir + '/.u1partial.bar')) |
| 697 | - self.assertTrue(os.path.exists(self.shares_dir + '/baz/baz.u1conflict')) |
| 698 | + self.assertTrue(os.path.exists(self.new_root_dir + '/foo.u1conflict')) |
| 699 | + self.assertTrue(os.path.exists(self.new_root_dir + '/foo.u1conflict.23')) |
| 700 | + self.assertTrue(os.path.exists(self.new_shares_dir + '/.u1partial.bar')) |
| 701 | + self.assertTrue(os.path.exists(self.new_shares_dir + '/baz/baz.u1conflict')) |
| 702 | + self.check_version() |
| 703 | |
| 704 | - def test_2_to_3_more(self): |
| 705 | - """ Test the upgrade from v.2 of the metadata to v.3 some more""" |
| 706 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 707 | - os.makedirs(vm_data_dir) |
| 708 | - with open(os.path.join(vm_data_dir, '.version'), 'w') as fd: |
| 709 | - fd.write('2') |
| 710 | + def test_upgrade_2_more(self): |
| 711 | + """Test the upgrade from v.2 some more.""" |
| 712 | + self._build_layout_version_1() |
| 713 | + self.set_md_version('2') |
| 714 | |
| 715 | expected = [] |
| 716 | |
| 717 | - for dirname in self.root_dir, self.shares_dir: |
| 718 | + for dirname, new_dirname in [(self.root_dir, self.new_root_dir), |
| 719 | + (self.shares_dir, self.new_shares_dir)]: |
| 720 | # a plain .conflict... |
| 721 | # ...on a file |
| 722 | open(dirname + '/1a.conflict', 'w').close() |
| 723 | - expected.append(dirname + '/1a.u1conflict') |
| 724 | + expected.append(new_dirname + '/1a.u1conflict') |
| 725 | # ...on an empty directory |
| 726 | os.mkdir(dirname + '/1b.conflict') |
| 727 | - expected.append(dirname + '/1b.u1conflict') |
| 728 | + expected.append(new_dirname + '/1b.u1conflict') |
| 729 | # ...on a directory with content |
| 730 | os.mkdir(dirname + '/1c.conflict') |
| 731 | os.mkdir(dirname + '/1c.conflict/1c') |
| 732 | - expected.append(dirname + '/1c.u1conflict/1c') |
| 733 | + expected.append(new_dirname + '/1c.u1conflict/1c') |
| 734 | # ...in a readonly directory |
| 735 | os.mkdir(dirname + '/1d') |
| 736 | os.mkdir(dirname + '/1d/1d.conflict') |
| 737 | os.chmod(dirname + '/1d', 0500) |
| 738 | - expected.append(dirname + '/1d/1d.u1conflict') |
| 739 | + expected.append(new_dirname + '/1d/1d.u1conflict') |
| 740 | # ...in a directory that is also a .conflict |
| 741 | os.mkdir(dirname + '/1e.conflict') |
| 742 | os.mkdir(dirname + '/1e.conflict/1e.conflict') |
| 743 | - expected.append(dirname + '/1e.u1conflict/1e.u1conflict') |
| 744 | + expected.append(new_dirname + '/1e.u1conflict/1e.u1conflict') |
| 745 | |
| 746 | # a numbered .conflict... |
| 747 | # ...on a file |
| 748 | open(dirname + '/2a.conflict.2', 'w').close() |
| 749 | - expected.append(dirname + '/2a.u1conflict.2') |
| 750 | + expected.append(new_dirname + '/2a.u1conflict.2') |
| 751 | # ...on an empty directory |
| 752 | os.mkdir(dirname + '/2b.conflict.3') |
| 753 | - expected.append(dirname + '/2b.u1conflict.3') |
| 754 | + expected.append(new_dirname + '/2b.u1conflict.3') |
| 755 | # ...on a directory with content |
| 756 | os.mkdir(dirname + '/2c.conflict.4') |
| 757 | os.mkdir(dirname + '/2c.conflict.4/2c') |
| 758 | - expected.append(dirname + '/2c.u1conflict.4/2c') |
| 759 | + expected.append(new_dirname + '/2c.u1conflict.4/2c') |
| 760 | # ...in a readonly directory |
| 761 | os.mkdir(dirname + '/2d') |
| 762 | os.mkdir(dirname + '/2d/2d.conflict.5') |
| 763 | os.chmod(dirname + '/2d', 0500) |
| 764 | - expected.append(dirname + '/2d/2d.u1conflict.5') |
| 765 | + expected.append(new_dirname + '/2d/2d.u1conflict.5') |
| 766 | # ...in a directory that is also a .conflict |
| 767 | os.mkdir(dirname + '/2e.conflict') |
| 768 | os.mkdir(dirname + '/2e.conflict/2e.conflict.6') |
| 769 | - expected.append(dirname + '/2e.u1conflict/2e.u1conflict.6') |
| 770 | + expected.append(new_dirname + '/2e.u1conflict/2e.u1conflict.6') |
| 771 | |
| 772 | # a plain .conflict of which there already exists a .u1conflict... |
| 773 | # ...on a file |
| 774 | open(dirname + '/3a.conflict', 'w').close() |
| 775 | open(dirname + '/3a.u1conflict', 'w').close() |
| 776 | - expected.append(dirname + '/3a.u1conflict') |
| 777 | - expected.append(dirname + '/3a.u1conflict.1') |
| 778 | + expected.append(new_dirname + '/3a.u1conflict') |
| 779 | + expected.append(new_dirname + '/3a.u1conflict.1') |
| 780 | # ...on an empty directory |
| 781 | os.mkdir(dirname + '/3b.conflict') |
| 782 | os.mkdir(dirname + '/3b.u1conflict') |
| 783 | - expected.append(dirname + '/3b.u1conflict') |
| 784 | - expected.append(dirname + '/3b.u1conflict.1') |
| 785 | + expected.append(new_dirname + '/3b.u1conflict') |
| 786 | + expected.append(new_dirname + '/3b.u1conflict.1') |
| 787 | # ...on a directory with content |
| 788 | os.mkdir(dirname + '/3c.conflict') |
| 789 | os.mkdir(dirname + '/3c.conflict/3c') |
| 790 | os.mkdir(dirname + '/3c.u1conflict') |
| 791 | os.mkdir(dirname + '/3c.u1conflict/3c2') |
| 792 | - expected.append(dirname + '/3c.u1conflict.1/3c') |
| 793 | - expected.append(dirname + '/3c.u1conflict/3c2') |
| 794 | + expected.append(new_dirname + '/3c.u1conflict.1/3c') |
| 795 | + expected.append(new_dirname + '/3c.u1conflict/3c2') |
| 796 | # ...in a readonly directory |
| 797 | os.mkdir(dirname + '/3d') |
| 798 | os.mkdir(dirname + '/3d/3d.conflict') |
| 799 | os.mkdir(dirname + '/3d/3d.u1conflict') |
| 800 | os.mkdir(dirname + '/3d/3d.u1conflict/3d') |
| 801 | os.chmod(dirname + '/3d', 0500) |
| 802 | - expected.append(dirname + '/3d/3d.u1conflict/3d') |
| 803 | - expected.append(dirname + '/3d/3d.u1conflict.1') |
| 804 | + expected.append(new_dirname + '/3d/3d.u1conflict/3d') |
| 805 | + expected.append(new_dirname + '/3d/3d.u1conflict.1') |
| 806 | # ...in a directory that is also a .conflict |
| 807 | os.mkdir(dirname + '/3e.conflict') |
| 808 | os.mkdir(dirname + '/3e.conflict/3e.conflict') |
| 809 | os.mkdir(dirname + '/3e.conflict/3e.u1conflict') |
| 810 | os.mkdir(dirname + '/3e.conflict/3e.u1conflict/3e') |
| 811 | - expected.append(dirname + '/3e.u1conflict/3e.u1conflict/3e') |
| 812 | - expected.append(dirname + '/3e.u1conflict/3e.u1conflict.1') |
| 813 | + expected.append(new_dirname + '/3e.u1conflict/3e.u1conflict/3e') |
| 814 | + expected.append(new_dirname + '/3e.u1conflict/3e.u1conflict.1') |
| 815 | |
| 816 | # a numbered .conflict of which there already exists a .u1conflict... |
| 817 | # ...on a file |
| 818 | open(dirname + '/4a.conflict.1', 'w').close() |
| 819 | open(dirname + '/4a.u1conflict.1', 'w').close() |
| 820 | - expected.append(dirname + '/4a.u1conflict.1') |
| 821 | - expected.append(dirname + '/4a.u1conflict.2') |
| 822 | + expected.append(new_dirname + '/4a.u1conflict.1') |
| 823 | + expected.append(new_dirname + '/4a.u1conflict.2') |
| 824 | # ...on an empty directory |
| 825 | os.mkdir(dirname + '/4b.conflict.2') |
| 826 | os.mkdir(dirname + '/4b.u1conflict.2') |
| 827 | - expected.append(dirname + '/4b.u1conflict.2') |
| 828 | - expected.append(dirname + '/4b.u1conflict.3') |
| 829 | + expected.append(new_dirname + '/4b.u1conflict.2') |
| 830 | + expected.append(new_dirname + '/4b.u1conflict.3') |
| 831 | # ...on a directory with content |
| 832 | os.mkdir(dirname + '/4c.conflict.3') |
| 833 | os.mkdir(dirname + '/4c.conflict.3/4c') |
| 834 | os.mkdir(dirname + '/4c.u1conflict.3') |
| 835 | - expected.append(dirname + '/4c.u1conflict.4/4c') |
| 836 | - expected.append(dirname + '/4c.u1conflict.3') |
| 837 | + expected.append(new_dirname + '/4c.u1conflict.4/4c') |
| 838 | + expected.append(new_dirname + '/4c.u1conflict.3') |
| 839 | # ...in a readonly directory |
| 840 | os.mkdir(dirname + '/4d') |
| 841 | os.mkdir(dirname + '/4d/4d.conflict.4') |
| 842 | os.mkdir(dirname + '/4d/4d.u1conflict.4') |
| 843 | os.chmod(dirname + '/4d', 0500) |
| 844 | - expected.append(dirname + '/4d/4d.u1conflict.4') |
| 845 | - expected.append(dirname + '/4d/4d.u1conflict.5') |
| 846 | + expected.append(new_dirname + '/4d/4d.u1conflict.4') |
| 847 | + expected.append(new_dirname + '/4d/4d.u1conflict.5') |
| 848 | # ...in a directory that is also a .conflict |
| 849 | os.mkdir(dirname + '/4e.conflict') |
| 850 | os.mkdir(dirname + '/4e.conflict/4e.conflict.5') |
| 851 | os.mkdir(dirname + '/4e.conflict/4e.u1conflict.5') |
| 852 | - expected.append(dirname + '/4e.u1conflict/4e.u1conflict.5') |
| 853 | - expected.append(dirname + '/4e.u1conflict/4e.u1conflict.6') |
| 854 | + expected.append(new_dirname + '/4e.u1conflict/4e.u1conflict.5') |
| 855 | + expected.append(new_dirname + '/4e.u1conflict/4e.u1conflict.6') |
| 856 | |
| 857 | # a plain .partial... |
| 858 | # ...of a file |
| 859 | open(dirname + '/5a.partial', 'w').close() |
| 860 | - expected.append(dirname + '/.u1partial.5a') |
| 861 | + expected.append(new_dirname + '/.u1partial.5a') |
| 862 | # ...of a directory |
| 863 | os.mkdir(dirname + '/5b') |
| 864 | open(dirname + '/5b/.partial', 'w').close() |
| 865 | - expected.append(dirname + '/5b/.u1partial') |
| 866 | + expected.append(new_dirname + '/5b/.u1partial') |
| 867 | # ...of a readonly directory |
| 868 | os.mkdir(dirname + '/5c') |
| 869 | open(dirname + '/5c/.partial', 'w').close() |
| 870 | os.chmod(dirname + '/5c', 0500) |
| 871 | - expected.append(dirname + '/5c/.u1partial') |
| 872 | + expected.append(new_dirname + '/5c/.u1partial') |
| 873 | |
| 874 | # a plain .partial of which there already exists a .u1partial... |
| 875 | # ...of a file |
| 876 | open(dirname + '/6a.partial', 'w').close() |
| 877 | open(dirname + '/.u1partial.6a', 'w').close() |
| 878 | - expected.append(dirname + '/.u1partial.6a') |
| 879 | - expected.append(dirname + '/.u1partial.6a.1') |
| 880 | + expected.append(new_dirname + '/.u1partial.6a') |
| 881 | + expected.append(new_dirname + '/.u1partial.6a.1') |
| 882 | # ...of a directory |
| 883 | os.mkdir(dirname + '/6b') |
| 884 | open(dirname + '/6b/.partial', 'w').close() |
| 885 | open(dirname + '/6b/.u1partial', 'w').close() |
| 886 | - expected.append(dirname + '/6b/.u1partial') |
| 887 | - expected.append(dirname + '/6b/.u1partial.1') |
| 888 | + expected.append(new_dirname + '/6b/.u1partial') |
| 889 | + expected.append(new_dirname + '/6b/.u1partial.1') |
| 890 | # ...of a readonly directory |
| 891 | os.mkdir(dirname + '/6c') |
| 892 | open(dirname + '/6c/.partial', 'w').close() |
| 893 | open(dirname + '/6c/.u1partial', 'w').close() |
| 894 | os.chmod(dirname + '/6c', 0500) |
| 895 | - expected.append(dirname + '/6c/.u1partial') |
| 896 | - expected.append(dirname + '/6c/.u1partial.1') |
| 897 | + expected.append(new_dirname + '/6c/.u1partial') |
| 898 | + expected.append(new_dirname + '/6c/.u1partial.1') |
| 899 | |
| 900 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 901 | + self._set_permissions() |
| 902 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 903 | self.data_dir, self.partials_dir) |
| 904 | |
| 905 | for path in expected: |
| 906 | self.assertTrue(os.path.exists(path), 'missing ' + path) |
| 907 | + self.check_version() |
| 908 | |
| 909 | def test_missing_version_file_with_version_non_0(self): |
| 910 | - """ Test the upgrade from the first shelf layout version to v3 |
| 911 | - while the metadata sould be in v3 format |
| 912 | + """Test the upgrade from the first shelf layout version |
| 913 | + while the metadata sould be in v3 or greater format. |
| 914 | + |
| 915 | """ |
| 916 | - # ensure a clean data_dir |
| 917 | - self.rmtree(self.data_dir) |
| 918 | - vm_data_dir = os.path.join(self.data_dir, 'vm', 'shares') |
| 919 | - maybe_old_shelf = LegacyShareFileShelf(vm_data_dir) |
| 920 | + self._build_layout_version_1() |
| 921 | + maybe_old_shelf = LegacyShareFileShelf(self.share_md_dir) |
| 922 | # add the root_uuid key |
| 923 | - root_share = Share(path=self.root_dir) |
| 924 | + root_share = _Share(self.root_dir) |
| 925 | root_share.access_level = 'Modify' |
| 926 | maybe_old_shelf[''] = root_share |
| 927 | for idx in range(1, 10): |
| 928 | - maybe_old_shelf[str(uuid.uuid4())] = \ |
| 929 | - Share(path=os.path.join(self.shares_dir, str(idx))) |
| 930 | + share_id = str(uuid.uuid4()) |
| 931 | + maybe_old_shelf[share_id] = \ |
| 932 | + _Share(share_id=share_id, |
| 933 | + path=os.path.join(self.shares_dir, str(idx))) |
| 934 | # ShareFileShelf.keys returns a generator |
| 935 | maybe_old_keys = [key for key in maybe_old_shelf.keys()] |
| 936 | self.assertEquals(10, len(maybe_old_keys)) |
| 937 | + if self.md_version_None: |
| 938 | + self.set_md_version('') |
| 939 | # we want to keep a refernece to main in order to shutdown |
| 940 | # pylint: disable-msg=W0201 |
| 941 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 942 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 943 | self.data_dir, self.partials_dir) |
| 944 | new_keys = [new_key for new_key in self.main.vm.shares.keys()] |
| 945 | self.assertEquals(10, len(new_keys)) |
| 946 | @@ -1689,160 +1749,444 @@ |
| 947 | # as we didn't actually upgrade the shelf, just the .version file |
| 948 | # check the empty 0.bkp |
| 949 | # check the old data is still there (in the backup) |
| 950 | - backup_shelf = LegacyShareFileShelf(os.path.join(vm_data_dir, '0.bkp')) |
| 951 | + backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp')) |
| 952 | backup_keys = [key for key in backup_shelf.keys()] |
| 953 | self.assertEquals(0, len(backup_keys)) |
| 954 | self.check_version() |
| 955 | |
| 956 | - def test_3_to_4(self): |
| 957 | - """upgrade from version 3 to 4""" |
| 958 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 959 | - os.makedirs(vm_data_dir) |
| 960 | - with open(os.path.join(vm_data_dir, '.version'), 'w') as fd: |
| 961 | - fd.write('3') |
| 962 | - os.rmdir(self.shares_dir) |
| 963 | - # build the old layout |
| 964 | - old_root = os.path.join(self.root_dir, 'My Files') |
| 965 | - old_shares = os.path.join(self.root_dir, 'Shared With Me') |
| 966 | - os.makedirs(os.path.join(old_root, 'test_dir')) |
| 967 | - open(os.path.join(old_root, 'test_file'), 'w').close() |
| 968 | + def test_upgrade_3(self): |
| 969 | + """Test upgrade from version 3.""" |
| 970 | + self._build_layout_version_1() |
| 971 | + self.set_md_version('3') |
| 972 | + # create a dir in the root |
| 973 | + os.makedirs(os.path.join(self.root_dir, 'test_dir')) |
| 974 | # create a file in the root |
| 975 | open(os.path.join(self.root_dir, 'test_file'), 'w').close() |
| 976 | - share_path = os.path.join(old_shares, 'Bla from Foo') |
| 977 | + # create a file in the new root |
| 978 | + open(os.path.join(self.new_root_dir, 'test_file'), 'w').close() |
| 979 | + share_path = os.path.join(self.shares_dir, 'Bla from Foo') |
| 980 | os.makedirs(share_path) |
| 981 | os.makedirs(os.path.join(share_path, 'test_dir')) |
| 982 | open(os.path.join(share_path, 'test_file'), 'w').close() |
| 983 | # fix permissions |
| 984 | - os.chmod(self.root_dir, 0555) |
| 985 | - os.chmod(old_shares, 0555) |
| 986 | + self._set_permissions() |
| 987 | + if self.md_version_None: |
| 988 | + self.set_md_version('') |
| 989 | # migrate the data |
| 990 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 991 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 992 | self.data_dir, self.partials_dir) |
| 993 | - self.assertFalse(os.path.exists(old_root)) |
| 994 | - self.assertTrue(os.path.exists(old_shares)) |
| 995 | - self.assertTrue(os.path.islink(old_shares)) |
| 996 | - self.assertEquals(old_shares, self.main.shares_dir_link) |
| 997 | - self.assertTrue(os.path.exists(os.path.join(self.root_dir, |
| 998 | + self.assertFalse(os.path.exists(self.root_dir)) |
| 999 | + self.assertTrue(os.path.exists(self.shares_dir)) |
| 1000 | + self.assertTrue(os.path.islink(self.shares_dir), self.shares_dir) |
| 1001 | + self.assertEquals(self.shares_dir, self.main.shares_dir_link) |
| 1002 | + self.assertTrue(os.path.exists(os.path.join(self.new_root_dir, |
| 1003 | 'test_dir'))) |
| 1004 | - self.assertTrue(os.path.exists(os.path.join(self.root_dir, |
| 1005 | + self.assertTrue(os.path.exists(os.path.join(self.new_root_dir, |
| 1006 | 'test_file'))) |
| 1007 | - self.assertTrue(os.path.exists(os.path.join(self.root_dir, |
| 1008 | + self.assertTrue(os.path.exists(os.path.join(self.new_root_dir, |
| 1009 | 'test_file.u1conflict'))) |
| 1010 | self.assertTrue(os.path.exists(share_path)) |
| 1011 | self.assertTrue(os.path.exists(os.path.join(share_path, 'test_dir'))) |
| 1012 | self.assertTrue(os.path.exists(os.path.join(share_path, 'test_file'))) |
| 1013 | + self.check_version() |
| 1014 | |
| 1015 | - def test_3_to_4_with_symlink_in_myfiles(self): |
| 1016 | - """upgrade from version 3 to 4""" |
| 1017 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 1018 | - os.makedirs(vm_data_dir) |
| 1019 | - with open(os.path.join(vm_data_dir, '.version'), 'w') as fd: |
| 1020 | - fd.write('3') |
| 1021 | - os.rmdir(self.shares_dir) |
| 1022 | + def test_upgrade_3_with_symlink_in_myfiles(self): |
| 1023 | + """Test upgrade from version 3 with symlink in 'My Files'.""" |
| 1024 | + self._build_layout_version_1() |
| 1025 | + self.set_md_version('3') |
| 1026 | # build the old layout |
| 1027 | - old_root = os.path.join(self.root_dir, 'My Files') |
| 1028 | - old_shares = os.path.join(self.root_dir, 'Shared With Me') |
| 1029 | - os.makedirs(os.path.join(old_root, 'test_dir')) |
| 1030 | - open(os.path.join(old_root, 'test_file'), 'w').close() |
| 1031 | + os.makedirs(os.path.join(self.root_dir, 'test_dir')) |
| 1032 | + open(os.path.join(self.root_dir, 'test_file'), 'w').close() |
| 1033 | # create a file in the root |
| 1034 | - open(os.path.join(self.root_dir, 'test_file'), 'w').close() |
| 1035 | - share_path = os.path.join(old_shares, 'Bla from Foo') |
| 1036 | + open(os.path.join(self.new_root_dir, 'test_file'), 'w').close() |
| 1037 | + share_path = os.path.join(self.shares_dir, 'Bla from Foo') |
| 1038 | os.makedirs(share_path) |
| 1039 | os.makedirs(os.path.join(share_path, 'test_dir')) |
| 1040 | open(os.path.join(share_path, 'test_file'), 'w').close() |
| 1041 | # create the Shared with Me symlink in My Files |
| 1042 | - os.symlink(old_shares, os.path.join(old_root, 'Shared With Me')) |
| 1043 | + os.symlink(self.shares_dir, os.path.join(self.root_dir, |
| 1044 | + "Shared With Me")) |
| 1045 | # fix permissions |
| 1046 | - os.chmod(self.root_dir, 0555) |
| 1047 | - os.chmod(old_shares, 0555) |
| 1048 | + self._set_permissions() |
| 1049 | + if self.md_version_None: |
| 1050 | + self.set_md_version('') |
| 1051 | # migrate the data |
| 1052 | - self.shares_dir = os.path.join(self.tmpdir, 'shares') |
| 1053 | - self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1054 | + self.main = FakeMain(self.new_root_dir, self.new_shares_dir, |
| 1055 | self.data_dir, self.partials_dir) |
| 1056 | - self.assertFalse(os.path.exists(old_root)) |
| 1057 | - self.assertTrue(os.path.exists(old_shares)) |
| 1058 | - self.assertTrue(os.path.islink(old_shares)) |
| 1059 | - self.assertEquals(old_shares, self.main.shares_dir_link) |
| 1060 | - self.assertTrue(os.path.exists(os.path.join(self.root_dir, |
| 1061 | + self.assertFalse(os.path.exists(self.root_dir)) |
| 1062 | + self.assertTrue(os.path.exists(self.shares_dir)) |
| 1063 | + self.assertTrue(os.path.islink(self.shares_dir)) |
| 1064 | + self.assertEquals(self.shares_dir, self.main.shares_dir_link) |
| 1065 | + self.assertTrue(os.path.exists(os.path.join(self.new_root_dir, |
| 1066 | 'test_dir'))) |
| 1067 | - self.assertTrue(os.path.exists(os.path.join(self.root_dir, |
| 1068 | + self.assertTrue(os.path.exists(os.path.join(self.new_root_dir, |
| 1069 | 'test_file'))) |
| 1070 | - self.assertTrue(os.path.exists(os.path.join(self.root_dir, |
| 1071 | + self.assertTrue(os.path.exists(os.path.join(self.new_root_dir, |
| 1072 | 'test_file.u1conflict'))) |
| 1073 | self.assertTrue(os.path.exists(share_path)) |
| 1074 | self.assertTrue(os.path.exists(os.path.join(share_path, 'test_dir'))) |
| 1075 | self.assertTrue(os.path.exists(os.path.join(share_path, 'test_file'))) |
| 1076 | self.assertEquals(self.main.shares_dir, |
| 1077 | os.readlink(self.main.shares_dir_link)) |
| 1078 | - |
| 1079 | - def test_None_to_4(self): |
| 1080 | - """upgrade from version None to 4 (possibly a clean start)""" |
| 1081 | - VolumeManager.METADATA_VERSION = '4' |
| 1082 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 1083 | - version_file = os.path.join(vm_data_dir, '.version') |
| 1084 | - if os.path.exists(version_file): |
| 1085 | - os.remove(version_file) |
| 1086 | - os.rmdir(self.shares_dir) |
| 1087 | - os.rmdir(self.root_dir) |
| 1088 | + self.check_version() |
| 1089 | + |
| 1090 | + |
| 1091 | +class MetadataNewLayoutTests(MetadataTestCase): |
| 1092 | + """Test for 'new' layout and metadata upgrade.""" |
| 1093 | + |
| 1094 | + def setUp(self): |
| 1095 | + MetadataTestCase.setUp(self) |
| 1096 | + self.share_md_dir = os.path.join(self.vm_data_dir, 'shares') |
| 1097 | + self.shared_md_dir = os.path.join(self.vm_data_dir, 'shared') |
| 1098 | + self.home_dir = os.path.join(self.tmpdir, 'home', 'ubuntuonehacker') |
| 1099 | + self.u1_dir = os.path.join(self.home_dir, os.path.split(self.u1_dir)[1]) |
| 1100 | + self.root_dir = self.u1_dir |
| 1101 | + self.shares_dir = os.path.join(self.tmpdir, 'shares') |
| 1102 | + self.shares_dir_link = os.path.join(self.u1_dir, 'Shared With Me') |
| 1103 | + |
| 1104 | + def _build_layout_version_4(self): |
| 1105 | + """Build the directory structure to mimic md v.4/5.""" |
| 1106 | + os.makedirs(self.share_md_dir) |
| 1107 | + os.makedirs(self.shared_md_dir) |
| 1108 | + os.makedirs(self.root_dir) |
| 1109 | + os.makedirs(self.shares_dir) |
| 1110 | + os.symlink(self.shares_dir, self.shares_dir_link) |
| 1111 | + |
| 1112 | + def _fix_permissions(self): |
| 1113 | + """Fix shares dir permissions, making it read-only.""" |
| 1114 | + os.chmod(self.shares_dir, 0500) |
| 1115 | + |
| 1116 | + def test_upgrade_None_to_last(self): |
| 1117 | + """Upgrade from version 'None' (possibly a clean start).""" |
| 1118 | old_root = os.path.join(self.root_dir, 'My Files') |
| 1119 | old_shares = os.path.join(self.root_dir, 'Shared With Me') |
| 1120 | # start and check that everything is ok |
| 1121 | self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1122 | self.data_dir, self.partials_dir) |
| 1123 | self.assertFalse(os.path.exists(old_root)) |
| 1124 | + self.assertTrue(os.path.exists(self.root_dir)) |
| 1125 | self.assertTrue(os.path.exists(old_shares)) |
| 1126 | self.assertTrue(os.path.islink(old_shares)) |
| 1127 | self.assertEquals(old_shares, self.main.shares_dir_link) |
| 1128 | - if os.path.exists(version_file): |
| 1129 | - with open(os.path.join(vm_data_dir, '.version'), 'r') as fd: |
| 1130 | - self.assertEquals('4', fd.read()) |
| 1131 | - else: |
| 1132 | - self.fail('missing .version file') |
| 1133 | - |
| 1134 | - def test_None_to_4_phantom_share_path(self): |
| 1135 | - """upgrade from version None to 4 (possibly a clean start)""" |
| 1136 | - VolumeManager.METADATA_VERSION = '4' |
| 1137 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 1138 | - version_file = os.path.join(vm_data_dir, '.version') |
| 1139 | - if os.path.exists(version_file): |
| 1140 | - os.remove(version_file) |
| 1141 | - os.rmdir(self.shares_dir) |
| 1142 | - os.rmdir(self.root_dir) |
| 1143 | + self.check_version() |
| 1144 | + |
| 1145 | + def test_upgrade_None_to_last_phantom_share_path(self): |
| 1146 | + """Upgrade from version 'None' (possibly a clean start) with a root |
| 1147 | + with missing path. |
| 1148 | + |
| 1149 | + """ |
| 1150 | old_root = os.path.join(self.root_dir, 'My Files') |
| 1151 | old_shares = os.path.join(self.root_dir, 'Shared With Me') |
| 1152 | - # start and check that everything is ok |
| 1153 | self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1154 | self.data_dir, self.partials_dir) |
| 1155 | - root = self.main.vm.shares[''] |
| 1156 | + self.main.shutdown() |
| 1157 | + self.rmtree(self.vm_data_dir) |
| 1158 | + os.makedirs(self.vm_data_dir) |
| 1159 | + self.set_md_version('') |
| 1160 | + shares = LegacyShareFileShelf(self.share_md_dir) |
| 1161 | + root_share = _Share(self.root_dir) |
| 1162 | + root_share.access_level = 'Modify' |
| 1163 | # set None to the share path |
| 1164 | - root.path = None |
| 1165 | - self.main.vm.shares['test'] = root |
| 1166 | - if os.path.exists(version_file): |
| 1167 | - os.remove(version_file) |
| 1168 | - self.main.shutdown() |
| 1169 | + root_share.path = None |
| 1170 | + shares[''] = root_share |
| 1171 | + |
| 1172 | + if self.md_version_None: |
| 1173 | + self.set_md_version('') |
| 1174 | # check that it's all OK |
| 1175 | self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1176 | self.data_dir, self.partials_dir) |
| 1177 | self.assertFalse(os.path.exists(old_root)) |
| 1178 | - self.assertTrue(os.path.exists(old_shares)) |
| 1179 | + self.assertTrue(os.path.exists(self.root_dir)) |
| 1180 | + self.assertTrue(os.path.exists(self.shares_dir)) |
| 1181 | self.assertTrue(os.path.islink(old_shares)) |
| 1182 | self.assertEquals(old_shares, self.main.shares_dir_link) |
| 1183 | - if os.path.exists(version_file): |
| 1184 | - with open(os.path.join(vm_data_dir, '.version'), 'r') as fd: |
| 1185 | - self.assertEquals('4', fd.read()) |
| 1186 | - else: |
| 1187 | - self.fail('missing .version file') |
| 1188 | - |
| 1189 | - def test_4_to_5(self): |
| 1190 | - """test migration from 4 to 5 (broken symlink in the root)""" |
| 1191 | - vm_data_dir = os.path.join(self.data_dir, 'vm') |
| 1192 | - os.makedirs(vm_data_dir) |
| 1193 | - with open(os.path.join(vm_data_dir, '.version'), 'w') as fd: |
| 1194 | - fd.write('4') |
| 1195 | - # build the new layout with a broken symlink |
| 1196 | - shares_link = os.path.join(self.root_dir, 'Shared With Me') |
| 1197 | - os.symlink(shares_link, shares_link) |
| 1198 | + self.check_version() |
| 1199 | + |
| 1200 | + def test_upgrade_4(self): |
| 1201 | + """Test migration from 4 to 5 (broken symlink in the root).""" |
| 1202 | + self._build_layout_version_4() |
| 1203 | + self.set_md_version('4') |
| 1204 | + # break the symlink |
| 1205 | + if os.path.exists(self.shares_dir_link): |
| 1206 | + os.unlink(self.shares_dir_link) |
| 1207 | + os.symlink(self.shares_dir_link, self.shares_dir_link) |
| 1208 | + |
| 1209 | + if self.md_version_None: |
| 1210 | + self.set_md_version('') |
| 1211 | self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1212 | self.data_dir, self.partials_dir) |
| 1213 | self.assertEquals(self.main.shares_dir, |
| 1214 | os.readlink(self.main.shares_dir_link)) |
| 1215 | + self.check_version() |
| 1216 | + |
| 1217 | + def test_upgrade_5(self): |
| 1218 | + """Test the migration from version 5.""" |
| 1219 | + # build a fake version 5 state |
| 1220 | + self._build_layout_version_4() |
| 1221 | + self.set_md_version('5') |
| 1222 | + # create some old shares and shared metadata |
| 1223 | + legacy_shares = LegacyShareFileShelf(self.share_md_dir) |
| 1224 | + root_share = _Share(path=self.root_dir, share_id='', |
| 1225 | + access_level='Modify') |
| 1226 | + legacy_shares[''] = root_share |
| 1227 | + for idx, name in enumerate(['share'] * 1000): |
| 1228 | + sid = str(uuid.uuid4()) |
| 1229 | + share_name = name + '_' + str(idx) |
| 1230 | + share = _Share(path=os.path.join(self.shares_dir, share_name), |
| 1231 | + share_id=sid, name=share_name, |
| 1232 | + node_id=str(uuid.uuid4()), |
| 1233 | + other_username='username'+str(idx), |
| 1234 | + other_visible_name='visible name ' + str(idx)) |
| 1235 | + if idx % 2: |
| 1236 | + share.access_level = 'Modify' |
| 1237 | + else: |
| 1238 | + share.access_level = 'View' |
| 1239 | + legacy_shares[sid] = share |
| 1240 | + |
| 1241 | + # create shared shares |
| 1242 | + legacy_shared = LegacyShareFileShelf(self.shared_md_dir) |
| 1243 | + for idx, name in enumerate(['dir'] * 5): |
| 1244 | + sid = str(uuid.uuid4()) |
| 1245 | + share_name = name + '_' + str(idx) |
| 1246 | + share = _Share(path=os.path.join(self.root_dir, share_name), |
| 1247 | + share_id=sid, node_id=str(uuid.uuid4()), |
| 1248 | + name=share_name, other_username='hola', |
| 1249 | + other_visible_name='hola') |
| 1250 | + if idx % 2: |
| 1251 | + share.access_level = 'Modify' |
| 1252 | + else: |
| 1253 | + share.access_level = 'View' |
| 1254 | + legacy_shared[sid] = share |
| 1255 | + |
| 1256 | + # keep a copy of the current shares and shared metadata to check |
| 1257 | + # the upgrade went ok |
| 1258 | + legacy_shares = dict(legacy_shares.items()) |
| 1259 | + legacy_shared = dict(legacy_shared.items()) |
| 1260 | + |
| 1261 | + if self.md_version_None: |
| 1262 | + self.set_md_version('') |
| 1263 | + # upgrade it! |
| 1264 | + self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1265 | + self.data_dir, self.partials_dir) |
| 1266 | + vm = self.main.vm |
| 1267 | + def compare_share(share, old_share): |
| 1268 | + """Compare two shares, new and old""" |
| 1269 | + self.assertEquals(share.volume_id, old_share.id) |
| 1270 | + self.assertEquals(share.path, old_share.path) |
| 1271 | + self.assertEquals(share.node_id, old_share.subtree) |
| 1272 | + if not isinstance(share, Root): |
| 1273 | + self.assertEquals(share.name, old_share.name) |
| 1274 | + self.assertEquals(share.other_username, old_share.other_username) |
| 1275 | + self.assertEquals(share.other_visible_name, old_share.other_visible_name) |
| 1276 | + self.assertEquals(share.access_level, old_share.access_level) |
| 1277 | + |
| 1278 | + for sid in vm.shares: |
| 1279 | + old_share = legacy_shares[sid] |
| 1280 | + share = vm.shares[sid] |
| 1281 | + self.assertTrue(isinstance(share, Share) or isinstance(share, Root)) |
| 1282 | + compare_share(share, old_share) |
| 1283 | + |
| 1284 | + for sid in vm.shared: |
| 1285 | + old_share = legacy_shared[sid] |
| 1286 | + share = vm.shared[sid] |
| 1287 | + self.assertTrue(isinstance(share, Shared)) |
| 1288 | + compare_share(share, old_share) |
| 1289 | + |
| 1290 | + def test_upgrade_5_with_udfs(self): |
| 1291 | + """Test the migration from version 5 with old UDFs.""" |
| 1292 | + # build a fake version 5 state |
| 1293 | + self._build_layout_version_4() |
| 1294 | + self.set_md_version('5') |
| 1295 | + self.udfs_md_dir = os.path.join(self.vm_data_dir, 'udfs') |
| 1296 | + # create some old shares and shared metadata |
| 1297 | + legacy_shares = LegacyShareFileShelf(self.share_md_dir) |
| 1298 | + root_share = _Share(path=self.root_dir, share_id='', |
| 1299 | + access_level='Modify') |
| 1300 | + legacy_shares[''] = root_share |
| 1301 | + for idx, name in enumerate(['share'] * 1000): |
| 1302 | + sid = str(uuid.uuid4()) |
| 1303 | + share_name = name + '_' + str(idx) |
| 1304 | + share = _Share(path=os.path.join(self.shares_dir, share_name), |
| 1305 | + share_id=sid, name=share_name, |
| 1306 | + node_id=str(uuid.uuid4()), |
| 1307 | + other_username='username'+str(idx), |
| 1308 | + other_visible_name='visible name ' + str(idx)) |
| 1309 | + if idx % 2: |
| 1310 | + share.access_level = 'Modify' |
| 1311 | + else: |
| 1312 | + share.access_level = 'View' |
| 1313 | + legacy_shares[sid] = share |
| 1314 | + |
| 1315 | + # create shared shares |
| 1316 | + legacy_shared = LegacyShareFileShelf(self.shared_md_dir) |
| 1317 | + for idx, name in enumerate(['dir'] * 5): |
| 1318 | + sid = str(uuid.uuid4()) |
| 1319 | + share_name = name + '_' + str(idx) |
| 1320 | + share = _Share(path=os.path.join(self.root_dir, share_name), |
| 1321 | + share_id=sid, node_id=str(uuid.uuid4()), |
| 1322 | + name=share_name, other_username='hola', |
| 1323 | + other_visible_name='hola') |
| 1324 | + if idx % 2: |
| 1325 | + share.access_level = 'Modify' |
| 1326 | + else: |
| 1327 | + share.access_level = 'View' |
| 1328 | + legacy_shared[sid] = share |
| 1329 | + |
| 1330 | + # create some udfs |
| 1331 | + legacy_udfs = LegacyShareFileShelf(self.udfs_md_dir) |
| 1332 | + for idx, name in enumerate(['dir'] * 5): |
| 1333 | + udf_id = str(uuid.uuid4()) |
| 1334 | + udf_name = name + '_' + str(idx) |
| 1335 | + udf = _UDF(udf_id, str(uuid.uuid4()), '~/' + udf_name, |
| 1336 | + os.path.join(self.home_dir, udf_name)) |
| 1337 | + if idx % 2: |
| 1338 | + udf.subscribed = True |
| 1339 | + else: |
| 1340 | + udf.subscribed = False |
| 1341 | + legacy_udfs[sid] = udf |
| 1342 | + |
| 1343 | + # keep a copy of the current shares and shared metadata to check |
| 1344 | + # the upgrade went ok |
| 1345 | + legacy_shares = dict(legacy_shares.items()) |
| 1346 | + legacy_shared = dict(legacy_shared.items()) |
| 1347 | + legacy_udfs = dict(legacy_udfs.items()) |
| 1348 | + |
| 1349 | + if self.md_version_None: |
| 1350 | + self.set_md_version('') |
| 1351 | + # upgrade it! |
| 1352 | + self.main = FakeMain(self.root_dir, self.shares_dir, |
| 1353 | + self.data_dir, self.partials_dir) |
| 1354 | + vm = self.main.vm |
| 1355 | + def compare_share(share, old_share): |
| 1356 | + """Compare two shares, new and old""" |
| 1357 | + self.assertEquals(share.volume_id, old_share.id) |
| 1358 | + self.assertEquals(share.path, old_share.path) |
| 1359 | + self.assertEquals(share.node_id, old_share.subtree) |
| 1360 | + if not isinstance(share, Root): |
| 1361 | + self.assertEquals(share.name, old_share.name) |
| 1362 | + self.assertEquals(share.other_username, old_share.other_username) |
| 1363 | + self.assertEquals(share.other_visible_name, old_share.other_visible_name) |
| 1364 | + self.assertEquals(share.access_level, old_share.access_level) |
| 1365 | + |
| 1366 | + for sid in vm.shares: |
| 1367 | + old_share = legacy_shares[sid] |
| 1368 | + share = vm.shares[sid] |
| 1369 | + self.assertTrue(isinstance(share, Share) or isinstance(share, Root)) |
| 1370 | + compare_share(share, old_share) |
| 1371 | + |
| 1372 | + for sid in vm.shared: |
| 1373 | + old_share = legacy_shared[sid] |
| 1374 | + share = vm.shared[sid] |
| 1375 | + self.assertTrue(isinstance(share, Shared)) |
| 1376 | + compare_share(share, old_share) |
| 1377 | + |
| 1378 | + for udf_id in vm.udfs: |
| 1379 | + old_udf = legacy_udfs[udf_id] |
| 1380 | + udf = vm.udfs[udf_id] |
| 1381 | + self.assertTrue(isinstance(udf, UDF)) |
| 1382 | + self.assertEquals(udf.volume_id, old_udf.id) |
| 1383 | + self.assertEquals(udf.path, old_udf.path) |
| 1384 | + self.assertEquals(udf.node_id, old_udf.node_id) |
| 1385 | + self.assertEquals(udf.suggested_path, old_udf.suggested_path) |
| 1386 | + self.assertEquals(udf.subscribed, old_udf.subscribed) |
| 1387 | + |
| 1388 | + |
| 1389 | +class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests): |
| 1390 | + """MetadataOldLayoutTests with broken .version file.""" |
| 1391 | + md_version_None = True |
| 1392 | + |
| 1393 | + |
| 1394 | +class BrokenNewMDVersionUpgradeTests(MetadataNewLayoutTests): |
| 1395 | + """MetadataNewLayoutTests with broken .version file.""" |
| 1396 | + md_version_None = True |
| 1397 | + |
| 1398 | + |
| 1399 | +class MetadataUpgraderTests(MetadataTestCase): |
| 1400 | + """MetadataUpgrader tests.""" |
| 1401 | + |
| 1402 | + def setUp(self): |
| 1403 | + """Create the MetadataUpgrader instance.""" |
| 1404 | + MetadataTestCase.setUp(self) |
| 1405 | + self.share_md_dir = os.path.join(self.vm_data_dir, 'shares') |
| 1406 | + self.shared_md_dir = os.path.join(self.vm_data_dir, 'shared') |
| 1407 | + self.udfs_md_dir = os.path.join(self.vm_data_dir, 'udfs') |
| 1408 | + self.home_dir = os.path.join(self.tmpdir, 'home', 'ubuntuonehacker') |
| 1409 | + self.u1_dir = os.path.join(self.home_dir, os.path.split(self.u1_dir)[1]) |
| 1410 | + self.root_dir = self.u1_dir |
| 1411 | + self.shares_dir = os.path.join(self.tmpdir, 'shares') |
| 1412 | + self.shares_dir_link = os.path.join(self.u1_dir, 'Shared With Me') |
| 1413 | + for path in [self.share_md_dir, self.shared_md_dir, |
| 1414 | + self.root_dir, self.shares_dir]: |
| 1415 | + if not os.path.exists(path): |
| 1416 | + os.makedirs(path) |
| 1417 | + os.symlink(self.shares_dir, self.shares_dir_link) |
| 1418 | + self.old_get_md_version = MetadataUpgrader._get_md_version |
| 1419 | + MetadataUpgrader._get_md_version = lambda _: None |
| 1420 | + self.md_upgrader = MetadataUpgrader(self.vm_data_dir, self.share_md_dir, |
| 1421 | + self.shared_md_dir, |
| 1422 | + self.udfs_md_dir, self.root_dir, |
| 1423 | + self.shares_dir, |
| 1424 | + self.shares_dir_link) |
| 1425 | + def tearDown(self): |
| 1426 | + """Restorre _get_md_version""" |
| 1427 | + MetadataUpgrader._get_md_version = self.old_get_md_version |
| 1428 | + MetadataTestCase.tearDown(self) |
| 1429 | + |
| 1430 | + def test_guess_metadata_version_None(self): |
| 1431 | + """Test _guess_metadata_version method for pre-version.""" |
| 1432 | + # fake a version None layout |
| 1433 | + if os.path.exists(self.version_file): |
| 1434 | + os.unlink(self.version_file) |
| 1435 | + for path in [self.share_md_dir, self.shared_md_dir, |
| 1436 | + self.root_dir, self.shares_dir]: |
| 1437 | + if os.path.exists(path): |
| 1438 | + self.rmtree(path) |
| 1439 | + os.makedirs(os.path.join(self.root_dir, 'My Files')) |
| 1440 | + shares_dir = os.path.join(self.root_dir, 'Shared With Me') |
| 1441 | + os.makedirs(shares_dir) |
| 1442 | + os.chmod(self.root_dir, 0500) |
| 1443 | + os.chmod(shares_dir, 0500) |
| 1444 | + version = self.md_upgrader._guess_metadata_version() |
| 1445 | + self.assertEquals(None, version) |
| 1446 | + |
| 1447 | + def test_guess_metadata_version_1_or_2(self): |
| 1448 | + """Test _guess_metadata_version method for version 1 or 2.""" |
| 1449 | + # fake a version 1 layout |
| 1450 | + if os.path.exists(self.version_file): |
| 1451 | + os.unlink(self.version_file) |
| 1452 | + self.rmtree(self.root_dir) |
| 1453 | + os.makedirs(os.path.join(self.root_dir, 'My Files')) |
| 1454 | + shares_dir = os.path.join(self.root_dir, 'Shared With Me') |
| 1455 | + os.makedirs(shares_dir) |
| 1456 | + os.chmod(self.root_dir, 0500) |
| 1457 | + os.chmod(shares_dir, 0500) |
| 1458 | + self.rmtree(self.shares_dir) |
| 1459 | + version = self.md_upgrader._guess_metadata_version() |
| 1460 | + self.assertIn(version, ['1', '2']) |
| 1461 | + |
| 1462 | + def test_guess_metadata_version_4(self): |
| 1463 | + """Test _guess_metadata_version method for version 4.""" |
| 1464 | + # fake a version 4 layout |
| 1465 | + if os.path.exists(self.version_file): |
| 1466 | + os.unlink(self.version_file) |
| 1467 | + os.unlink(self.shares_dir_link) |
| 1468 | + os.symlink(self.shares_dir_link, self.shares_dir_link) |
| 1469 | + version = self.md_upgrader._guess_metadata_version() |
| 1470 | + self.assertEquals(version, '4') |
| 1471 | + |
| 1472 | + def test_guess_metadata_version_5(self): |
| 1473 | + """Test _guess_metadata_version method for version 5.""" |
| 1474 | + # fake a version 5 layout and metadata |
| 1475 | + shelf = LegacyShareFileShelf(self.share_md_dir) |
| 1476 | + shelf['foobar'] = _Share(path='/foo/bar', share_id='foobar') |
| 1477 | + version = self.md_upgrader._guess_metadata_version() |
| 1478 | + self.assertEquals(version, '5') |
| 1479 | + |
| 1480 | + def test_guess_metadata_version_6(self): |
| 1481 | + """Test _guess_metadata_version method for version 6.""" |
| 1482 | + # fake a version 6 layout and metadata |
| 1483 | + shelf = VMFileShelf(self.share_md_dir) |
| 1484 | + shelf['foobar'] = Share(path='/foo/bar', volume_id='foobar') |
| 1485 | + version = self.md_upgrader._guess_metadata_version() |
| 1486 | + self.assertEquals(version, '6') |
| 1487 | + |
| 1488 | |
| 1489 | === modified file 'ubuntuone/syncdaemon/dbus_interface.py' |
| 1490 | --- ubuntuone/syncdaemon/dbus_interface.py 2010-01-26 14:32:54 +0000 |
| 1491 | +++ ubuntuone/syncdaemon/dbus_interface.py 2010-01-26 20:35:29 +0000 |
| 1492 | @@ -544,7 +544,9 @@ |
| 1493 | def handle_SV_SHARE_CHANGED(self, message, share): |
| 1494 | """ handle SV_SHARE_CHANGED event, emit's ShareChanged signal. """ |
| 1495 | self.handle_default('SV_SHARE_CHANGED', message, share) |
| 1496 | - self.dbus_iface.shares.emit_share_changed(message, share) |
| 1497 | + if message != 'deleted': |
| 1498 | + # deleted shares are handled in VM |
| 1499 | + self.dbus_iface.shares.emit_share_changed(message, share) |
| 1500 | |
| 1501 | def handle_SV_FREE_SPACE(self, share_id, free_bytes): |
| 1502 | """ handle SV_FREE_SPACE event, emit ShareChanged signal. """ |
| 1503 | @@ -871,6 +873,8 @@ |
| 1504 | share_dict[unicode(k)] = '' |
| 1505 | elif k == 'path': |
| 1506 | share_dict[unicode(k)] = v.decode('utf-8') |
| 1507 | + elif k == 'accepted': |
| 1508 | + share_dict[unicode(k)] = self.bool_str(v) |
| 1509 | else: |
| 1510 | share_dict[unicode(k)] = unicode(v) |
| 1511 | return share_dict |
| 1512 | @@ -949,7 +953,7 @@ |
| 1513 | |
| 1514 | def emit_share_answer_response(self, share_id, answer, error=None): |
| 1515 | """ emits ShareCreated signal """ |
| 1516 | - answer_info = dict(share_id=share_id, answer=answer) |
| 1517 | + answer_info = dict(volume_id=share_id, answer=answer) |
| 1518 | if error: |
| 1519 | answer_info['error'] = error |
| 1520 | self.ShareAnswerResponse(answer_info) |
| 1521 | |
| 1522 | === modified file 'ubuntuone/syncdaemon/event_queue.py' |
| 1523 | --- ubuntuone/syncdaemon/event_queue.py 2010-01-26 15:54:29 +0000 |
| 1524 | +++ ubuntuone/syncdaemon/event_queue.py 2010-01-26 20:35:29 +0000 |
| 1525 | @@ -159,7 +159,7 @@ |
| 1526 | } |
| 1527 | |
| 1528 | # these are the events that will listen from inotify |
| 1529 | -INOTIFY_EVENTS = ( |
| 1530 | +INOTIFY_EVENTS_GENERAL = ( |
| 1531 | evtcodes.IN_OPEN | |
| 1532 | evtcodes.IN_CLOSE_NOWRITE | |
| 1533 | evtcodes.IN_CLOSE_WRITE | |
| 1534 | @@ -169,6 +169,12 @@ |
| 1535 | evtcodes.IN_MOVED_TO | |
| 1536 | evtcodes.IN_MOVE_SELF |
| 1537 | ) |
| 1538 | +INOTIFY_EVENTS_ANCESTORS = ( |
| 1539 | + evtcodes.IN_DELETE | |
| 1540 | + evtcodes.IN_MOVED_FROM | |
| 1541 | + evtcodes.IN_MOVED_TO | |
| 1542 | + evtcodes.IN_MOVE_SELF |
| 1543 | +) |
| 1544 | |
| 1545 | DEFAULT_HANDLER = "handle_default" # receives (event_name, *args, **kwargs) |
| 1546 | |
| 1547 | @@ -201,14 +207,58 @@ |
| 1548 | return True |
| 1549 | |
| 1550 | |
| 1551 | -class _INotifyProcessor(pyinotify.ProcessEvent): |
| 1552 | - """Helper class that is called from inpotify when an event happens. |
| 1553 | +class _AncestorsINotifyProcessor(pyinotify.ProcessEvent): |
| 1554 | + """inotify's processor when an event happens on an UDFs ancestor.""" |
| 1555 | + def __init__(self, eq): |
| 1556 | + self.log = logging.getLogger('ubuntuone.SyncDaemon.AncestorsINotProc') |
| 1557 | + self.eq = eq |
| 1558 | + |
| 1559 | + def _get_udf(self, path): |
| 1560 | + """Get the udf for a specific path. |
| 1561 | + |
| 1562 | + It can return None in case the UDF was deleted in the meantime. |
| 1563 | + """ |
| 1564 | + for udf in self.eq.fs.vm.udfs.itervalues(): |
| 1565 | + parent = os.path.dirname(udf.path) + os.path.sep |
| 1566 | + if parent.startswith(path + os.path.sep): |
| 1567 | + return udf |
| 1568 | + return None |
| 1569 | + |
| 1570 | + def process_IN_MOVE_SELF(self, event): |
| 1571 | + """Don't do anything here. |
| 1572 | + |
| 1573 | + We just turned this event on because pyinotify does some |
| 1574 | + path-fixing in its internal processing when this happens. |
| 1575 | + """ |
| 1576 | + process_IN_MOVED_TO = process_IN_MOVE_SELF |
| 1577 | + |
| 1578 | + def process_IN_MOVED_FROM(self, event): |
| 1579 | + """Getting it out or renaming means unsuscribe.""" |
| 1580 | + if event.mask & evtcodes.IN_ISDIR: |
| 1581 | + udf = self._get_udf(event.path) |
| 1582 | + if udf is not None: |
| 1583 | + self.log.info("Got MOVED_FROM on path %r, unsubscribing " |
| 1584 | + "udf %s", event.path, udf) |
| 1585 | + self.eq.fs.vm.unsubscribe_udf(udf.volume_id) |
| 1586 | + |
| 1587 | + def process_IN_DELETE(self, event): |
| 1588 | + """Check to see if the UDF was deleted.""" |
| 1589 | + if event.mask & evtcodes.IN_ISDIR: |
| 1590 | + udf = self._get_udf(event.path) |
| 1591 | + if udf is not None and udf.path == event.pathname: |
| 1592 | + self.log.info("Got DELETE on path %r, deleting udf %s", |
| 1593 | + event.path, udf) |
| 1594 | + self.eq.fs.vm.delete_volume(udf.volume_id) |
| 1595 | + |
| 1596 | + |
| 1597 | +class _GeneralINotifyProcessor(pyinotify.ProcessEvent): |
| 1598 | + """inotify's processor when a general event happens. |
| 1599 | |
| 1600 | This class also catchs the MOVEs events, and synthetises a new |
| 1601 | FS_(DIR|FILE)_MOVE event when possible. |
| 1602 | """ |
| 1603 | def __init__(self, eq): |
| 1604 | - self.log = logging.getLogger('ubuntuone.SyncDaemon.INotifyProcessor') |
| 1605 | + self.log = logging.getLogger('ubuntuone.SyncDaemon.GeneralINotProc') |
| 1606 | self.eq = eq |
| 1607 | self.held_event = None |
| 1608 | self.timer = None |
| 1609 | @@ -216,16 +266,6 @@ |
| 1610 | self.frozen_evts = False |
| 1611 | self._to_mute = MuteFilter() |
| 1612 | |
| 1613 | - def _is_udf_ancestor(self, path): |
| 1614 | - """Decide if path is an UDF ancestor or not.""" |
| 1615 | - result = None |
| 1616 | - for udf in self.eq.fs.vm.udfs.itervalues(): |
| 1617 | - parent = os.path.dirname(udf.path) + os.path.sep |
| 1618 | - if parent.startswith(path + os.path.sep): |
| 1619 | - return udf |
| 1620 | - |
| 1621 | - return result |
| 1622 | - |
| 1623 | def add_to_mute_filter(self, event, *paths): |
| 1624 | """Add an event and path(s) to the mute filter.""" |
| 1625 | # all events have one path except the MOVEs |
| 1626 | @@ -261,14 +301,12 @@ |
| 1627 | |
| 1628 | def process_IN_OPEN(self, event): |
| 1629 | """Filter IN_OPEN to make it happen only in files.""" |
| 1630 | - if not (event.mask & evtcodes.IN_ISDIR) and \ |
| 1631 | - not self._is_udf_ancestor(event.path): |
| 1632 | + if not (event.mask & evtcodes.IN_ISDIR): |
| 1633 | self.push_event(event) |
| 1634 | |
| 1635 | def process_IN_CLOSE_NOWRITE(self, event): |
| 1636 | """Filter IN_CLOSE_NOWRITE to make it happen only in files.""" |
| 1637 | - if not (event.mask & evtcodes.IN_ISDIR) and \ |
| 1638 | - not self._is_udf_ancestor(event.path): |
| 1639 | + if not (event.mask & evtcodes.IN_ISDIR): |
| 1640 | self.push_event(event) |
| 1641 | |
| 1642 | def process_IN_MOVE_SELF(self, event): |
| 1643 | @@ -281,11 +319,6 @@ |
| 1644 | |
| 1645 | def process_IN_MOVED_FROM(self, event): |
| 1646 | """Capture the MOVED_FROM to maybe syntethize FILE_MOVED.""" |
| 1647 | - udf = self._is_udf_ancestor(event.path) |
| 1648 | - if udf is not None: |
| 1649 | - self.eq.fs.vm.unsubscribe_udf(udf.volume_id) |
| 1650 | - return |
| 1651 | - |
| 1652 | if self.held_event is not None: |
| 1653 | self.release_held_event() |
| 1654 | |
| 1655 | @@ -317,9 +350,6 @@ |
| 1656 | |
| 1657 | def process_IN_MOVED_TO(self, event): |
| 1658 | """Capture the MOVED_TO to maybe syntethize FILE_MOVED.""" |
| 1659 | - if self._is_udf_ancestor(event.path): |
| 1660 | - return |
| 1661 | - |
| 1662 | if self.held_event is not None: |
| 1663 | if event.cookie == self.held_event.cookie: |
| 1664 | try: |
| 1665 | @@ -381,16 +411,6 @@ |
| 1666 | |
| 1667 | def process_default(self, event): |
| 1668 | """Push the event into the EventQueue.""" |
| 1669 | - udf = self._is_udf_ancestor(event.path) |
| 1670 | - if udf is not None: |
| 1671 | - # if event is the deletion of the UDF per se, |
| 1672 | - # call delete_volume on VolumeManager for that UDF. |
| 1673 | - ename = NAME_TRANSLATIONS.get(event.mask, None) |
| 1674 | - is_dir_delete = ename is not None and ename == 'FS_DIR_DELETE' |
| 1675 | - if udf.path == event.pathname and is_dir_delete: |
| 1676 | - self.eq.fs.vm.delete_volume(udf.volume_id) |
| 1677 | - return |
| 1678 | - |
| 1679 | if self.held_event is not None: |
| 1680 | self.release_held_event() |
| 1681 | self.push_event(event) |
| 1682 | @@ -482,13 +502,23 @@ |
| 1683 | |
| 1684 | self.log = logging.getLogger('ubuntuone.SyncDaemon.EQ') |
| 1685 | self.fs = fs |
| 1686 | - # hook inotify |
| 1687 | - self._inotify_reader = None |
| 1688 | - self._inotify_wm = wm = pyinotify.WatchManager() |
| 1689 | - self._processor = _INotifyProcessor(self) |
| 1690 | - self._inotify_notifier = pyinotify.Notifier(wm, self._processor) |
| 1691 | - self._hook_inotify_to_twisted(wm, self._inotify_notifier) |
| 1692 | - self._watchs = {} |
| 1693 | + |
| 1694 | + # general inotify |
| 1695 | + self._inotify_general_wm = wm = pyinotify.WatchManager() |
| 1696 | + self._processor = _GeneralINotifyProcessor(self) |
| 1697 | + self._inotify_notifier_gral = pyinotify.Notifier(wm, self._processor) |
| 1698 | + self._inotify_reader_gral = self._hook_inotify_to_twisted( |
| 1699 | + wm, self._inotify_notifier_gral) |
| 1700 | + self._general_watchs = {} |
| 1701 | + |
| 1702 | + # ancestors inotify |
| 1703 | + self._inotify_ancestors_wm = wm = pyinotify.WatchManager() |
| 1704 | + antr_processor = _AncestorsINotifyProcessor(self) |
| 1705 | + self._inotify_notifier_antr = pyinotify.Notifier(wm, antr_processor) |
| 1706 | + self._inotify_reader_antr = self._hook_inotify_to_twisted( |
| 1707 | + wm, self._inotify_notifier_antr) |
| 1708 | + self._ancestors_watchs = {} |
| 1709 | + |
| 1710 | self.dispatching = False |
| 1711 | self.dispatch_queue = Queue() |
| 1712 | self.empty_event_queue_callbacks = set() |
| 1713 | @@ -526,34 +556,64 @@ |
| 1714 | notifier.process_events() |
| 1715 | |
| 1716 | reader = MyReader() |
| 1717 | - self._inotify_reader = reader |
| 1718 | reactor.addReader(reader) |
| 1719 | + return reader |
| 1720 | |
| 1721 | def shutdown(self): |
| 1722 | """Prepares the EQ to be closed.""" |
| 1723 | - self._inotify_notifier.stop() |
| 1724 | - reactor.removeReader(self._inotify_reader) |
| 1725 | + self._inotify_notifier_gral.stop() |
| 1726 | + self._inotify_notifier_antr.stop() |
| 1727 | + reactor.removeReader(self._inotify_reader_gral) |
| 1728 | + reactor.removeReader(self._inotify_reader_antr) |
| 1729 | |
| 1730 | def inotify_rm_watch(self, dirpath): |
| 1731 | """Remove watch from a dir.""" |
| 1732 | - try: |
| 1733 | - wd = self._watchs[dirpath] |
| 1734 | - except KeyError: |
| 1735 | + if dirpath in self._general_watchs: |
| 1736 | + w_dict = self._general_watchs |
| 1737 | + w_manager = self._inotify_general_wm |
| 1738 | + elif dirpath in self._ancestors_watchs: |
| 1739 | + w_dict = self._ancestors_watchs |
| 1740 | + w_manager = self._inotify_ancestors_wm |
| 1741 | + else: |
| 1742 | raise ValueError("The path %r is not watched right now!" % dirpath) |
| 1743 | - result = self._inotify_wm.rm_watch(wd) |
| 1744 | + |
| 1745 | + wd = w_dict[dirpath] |
| 1746 | + result = w_manager.rm_watch(wd) |
| 1747 | if not result[wd]: |
| 1748 | raise RuntimeError("The path %r couldn't be removed!" % dirpath) |
| 1749 | - del self._watchs[dirpath] |
| 1750 | + del w_dict[dirpath] |
| 1751 | |
| 1752 | def inotify_add_watch(self, dirpath): |
| 1753 | """Add watch to a dir.""" |
| 1754 | - self.log.debug("Adding inotify watch to %r", dirpath) |
| 1755 | - result = self._inotify_wm.add_watch(dirpath, INOTIFY_EVENTS) |
| 1756 | - self._watchs[dirpath] = result[dirpath] |
| 1757 | + # see where to add it |
| 1758 | + if self._is_udf_ancestor(dirpath): |
| 1759 | + w_type = "ancestors" |
| 1760 | + w_manager = self._inotify_ancestors_wm |
| 1761 | + w_dict = self._ancestors_watchs |
| 1762 | + events = INOTIFY_EVENTS_ANCESTORS |
| 1763 | + else: |
| 1764 | + w_type = "general" |
| 1765 | + w_manager = self._inotify_general_wm |
| 1766 | + w_dict = self._general_watchs |
| 1767 | + events = INOTIFY_EVENTS_GENERAL |
| 1768 | + |
| 1769 | + # add the watch! |
| 1770 | + self.log.debug("Adding %s inotify watch to %r", w_type, dirpath) |
| 1771 | + result = w_manager.add_watch(dirpath, events) |
| 1772 | + w_dict[dirpath] = result[dirpath] |
| 1773 | |
| 1774 | def inotify_has_watch(self, dirpath): |
| 1775 | """Check if a dirpath is watched.""" |
| 1776 | - return dirpath in self._watchs |
| 1777 | + return (dirpath in self._general_watchs or |
| 1778 | + dirpath in self._ancestors_watchs) |
| 1779 | + |
| 1780 | + def _is_udf_ancestor(self, path): |
| 1781 | + """Decide if path is an UDF ancestor or not.""" |
| 1782 | + for udf in self.fs.vm.udfs.itervalues(): |
| 1783 | + parent = os.path.dirname(udf.path) + os.path.sep |
| 1784 | + if parent.startswith(path + os.path.sep): |
| 1785 | + return True |
| 1786 | + return False |
| 1787 | |
| 1788 | def unsubscribe(self, obj): |
| 1789 | """Removes the callback object from the listener queue. |
| 1790 | |
| 1791 | === modified file 'ubuntuone/syncdaemon/tools.py' |
| 1792 | --- ubuntuone/syncdaemon/tools.py 2010-01-25 14:58:45 +0000 |
| 1793 | +++ ubuntuone/syncdaemon/tools.py 2010-01-26 20:35:29 +0000 |
| 1794 | @@ -298,7 +298,7 @@ |
| 1795 | self.log.debug('accept_share(%s)', share_id) |
| 1796 | shares_client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME) |
| 1797 | d = self.wait_for_signal('ShareAnswerResponse', |
| 1798 | - lambda info: info['share_id']==share_id) |
| 1799 | + lambda info: info['volume_id']==share_id) |
| 1800 | shares_client.call_method('accept_share', share_id, |
| 1801 | reply_handler=lambda _: None, |
| 1802 | error_handler=d.errback) |
| 1803 | @@ -309,7 +309,7 @@ |
| 1804 | self.log.debug('reject_share(%s)', share_id) |
| 1805 | shares_client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME) |
| 1806 | d = self.wait_for_signal('ShareAnswerResponse', |
| 1807 | - lambda info: info['share_id']==share_id) |
| 1808 | + lambda info: info['volume_id']==share_id) |
| 1809 | shares_client.call_method('reject_share', share_id, |
| 1810 | reply_handler=lambda _: None, |
| 1811 | error_handler=d.errback) |
| 1812 | @@ -601,8 +601,8 @@ |
| 1813 | for share in shares: |
| 1814 | msg_template = ' id=%s name=%s accepted=%s ' + \ |
| 1815 | 'access_level=%s to=%s path=%s\n' |
| 1816 | - out.write(msg_template % (share['id'], share['name'], |
| 1817 | - share['accepted'], share['access_level'], |
| 1818 | + out.write(msg_template % (share['volume_id'], share['name'], |
| 1819 | + bool(share['accepted']), share['access_level'], |
| 1820 | share['other_username'], |
| 1821 | share['path'])) |
| 1822 | |
| 1823 | @@ -615,7 +615,7 @@ |
| 1824 | out.write("Folder list:\n") |
| 1825 | for folder in folders: |
| 1826 | msg_template = ' id=%s subscribed=%s path=%s\n' |
| 1827 | - out.write(msg_template % (folder['id'], folder['subscribed'], |
| 1828 | + out.write(msg_template % (folder['volume_id'], folder['subscribed'], |
| 1829 | folder['path'])) |
| 1830 | |
| 1831 | |
| 1832 | @@ -636,7 +636,7 @@ |
| 1833 | out.write("Shares list:\n") |
| 1834 | for share in shares: |
| 1835 | out.write(' id=%s name=%s accepted=%s access_level=%s from=%s\n' % \ |
| 1836 | - (share['id'], share['name'], share['accepted'], |
| 1837 | + (share['volume_id'], share['name'], bool(share['accepted']), |
| 1838 | share['access_level'], share['other_username'])) |
| 1839 | |
| 1840 | |
| 1841 | |
| 1842 | === modified file 'ubuntuone/syncdaemon/volume_manager.py' |
| 1843 | --- ubuntuone/syncdaemon/volume_manager.py 2010-01-26 19:40:05 +0000 |
| 1844 | +++ ubuntuone/syncdaemon/volume_manager.py 2010-01-26 20:35:29 +0000 |
| 1845 | @@ -41,10 +41,10 @@ |
| 1846 | from twisted.internet import defer |
| 1847 | |
| 1848 | |
| 1849 | -class Share(object): |
| 1850 | +class _Share(object): |
| 1851 | """Represents a share or mount point""" |
| 1852 | |
| 1853 | - def __init__(self, volume_id=request.ROOT, node_id=None, path=None, |
| 1854 | + def __init__(self, share_id=request.ROOT, node_id=None, path=None, |
| 1855 | name=None, access_level='View', accepted=False, |
| 1856 | other_username=None, other_visible_name=None): |
| 1857 | """ Creates the instance. |
| 1858 | @@ -55,7 +55,7 @@ |
| 1859 | self.path = None |
| 1860 | else: |
| 1861 | self.path = os.path.normpath(path) |
| 1862 | - self.id = str(volume_id) |
| 1863 | + self.id = str(share_id) |
| 1864 | self.access_level = access_level |
| 1865 | self.accepted = accepted |
| 1866 | self.name = name |
| 1867 | @@ -64,16 +64,77 @@ |
| 1868 | self.subtree = node_id |
| 1869 | self.free_bytes = None |
| 1870 | |
| 1871 | + |
| 1872 | +class _UDF(object): |
| 1873 | + """A representation of a User Defined Folder.""" |
| 1874 | + |
| 1875 | + def __init__(self, udf_id, node_id, suggested_path, |
| 1876 | + path, subscribed=True): |
| 1877 | + """Create the UDF, subscribed by default""" |
| 1878 | + # id and node_id should be str or None |
| 1879 | + assert isinstance(udf_id, basestring) or udf_id is None |
| 1880 | + assert isinstance(node_id, basestring) or node_id is None |
| 1881 | + self.id = udf_id |
| 1882 | + self.node_id = node_id |
| 1883 | + self.suggested_path = suggested_path |
| 1884 | + self.path = path |
| 1885 | + self.subscribed = subscribed |
| 1886 | + |
| 1887 | + |
| 1888 | +class Volume(object): |
| 1889 | + """A generic volume.""" |
| 1890 | + |
| 1891 | + def __init__(self, volume_id, node_id): |
| 1892 | + """Create the volume.""" |
| 1893 | + # id and node_id should be str or None |
| 1894 | + assert isinstance(volume_id, basestring) or volume_id is None |
| 1895 | + assert isinstance(node_id, basestring) or node_id is None |
| 1896 | + self.volume_id = volume_id |
| 1897 | + self.node_id = node_id |
| 1898 | + |
| 1899 | + @property |
| 1900 | + def id(self): |
| 1901 | + return self.volume_id |
| 1902 | + |
| 1903 | + def can_write(self): |
| 1904 | + raise NotImplementedError('Subclass responsability') |
| 1905 | + |
| 1906 | + def __eq__(self, other): |
| 1907 | + result = (self.id == other.id and |
| 1908 | + self.node_id == other.node_id) |
| 1909 | + return result |
| 1910 | + |
| 1911 | + |
| 1912 | +class Share(Volume): |
| 1913 | + """A volume representing a Share.""" |
| 1914 | + |
| 1915 | + def __init__(self, volume_id=None, node_id=None, path=None, name=None, |
| 1916 | + other_username=None, other_visible_name=None, accepted=False, |
| 1917 | + access_level='View', free_bytes=None): |
| 1918 | + """Create the share.""" |
| 1919 | + super(Share, self).__init__(volume_id, node_id) |
| 1920 | + self.__dict__['type'] = 'Share' |
| 1921 | + if path is None: |
| 1922 | + self.path = None |
| 1923 | + else: |
| 1924 | + self.path = os.path.normpath(path) |
| 1925 | + self.name = name |
| 1926 | + self.other_username = other_username |
| 1927 | + self.other_visible_name = other_visible_name |
| 1928 | + self.accepted = accepted |
| 1929 | + self.access_level = access_level |
| 1930 | + self.free_bytes = free_bytes |
| 1931 | + |
| 1932 | @classmethod |
| 1933 | def from_response(cls, share_response, path): |
| 1934 | """ Creates a Share instance from a ShareResponse. |
| 1935 | |
| 1936 | The received path should be 'bytes' |
| 1937 | """ |
| 1938 | - share = cls(str(share_response.id), share_response.subtree, path, |
| 1939 | - share_response.name, share_response.access_level, |
| 1940 | - share_response.accepted, share_response.other_username, |
| 1941 | - share_response.other_visible_name) |
| 1942 | + share = cls(str(share_response.id), str(share_response.subtree), |
| 1943 | + path, share_response.name, share_response.other_username, |
| 1944 | + share_response.other_visible_name, |
| 1945 | + share_response.accepted, share_response.access_level) |
| 1946 | return share |
| 1947 | |
| 1948 | @classmethod |
| 1949 | @@ -82,12 +143,12 @@ |
| 1950 | |
| 1951 | The received path should be 'bytes' |
| 1952 | """ |
| 1953 | - share = cls(path=path, volume_id=str(share_notify.share_id), |
| 1954 | - name=share_notify.share_name, |
| 1955 | - access_level=share_notify.access_level, |
| 1956 | + share = cls(volume_id=str(share_notify.share_id), |
| 1957 | + node_id=str(share_notify.subtree), |
| 1958 | + path=path, name=share_notify.share_name, |
| 1959 | other_username=share_notify.from_username, |
| 1960 | other_visible_name=share_notify.from_visible_name, |
| 1961 | - node_id=share_notify.subtree) |
| 1962 | + access_level=share_notify.access_level) |
| 1963 | return share |
| 1964 | |
| 1965 | @classmethod |
| 1966 | @@ -97,13 +158,11 @@ |
| 1967 | The received path should be 'bytes' |
| 1968 | |
| 1969 | """ |
| 1970 | - share = cls(volume_id=str(share_volume.volume_id), path=path, |
| 1971 | - name=share_volume.share_name, |
| 1972 | - access_level=share_volume.access_level, |
| 1973 | - other_username=share_volume.other_username, |
| 1974 | - other_visible_name=share_volume.other_visible_name, |
| 1975 | - node_id=str(share_volume.node_id), |
| 1976 | - accepted=share_volume.accepted) |
| 1977 | + share = cls(str(share_volume.volume_id), str(share_volume.node_id), |
| 1978 | + path, share_volume.share_name, |
| 1979 | + share_volume.other_username, |
| 1980 | + share_volume.other_visible_name, share_volume.accepted, |
| 1981 | + share_volume.access_level) |
| 1982 | return share |
| 1983 | |
| 1984 | def can_write(self): |
| 1985 | @@ -114,35 +173,64 @@ |
| 1986 | |
| 1987 | @property |
| 1988 | def active(self): |
| 1989 | - """Returns True if the Share is accepted.""" |
| 1990 | + """Return True if this Share is accepted.""" |
| 1991 | return self.accepted |
| 1992 | |
| 1993 | - # node_id property |
| 1994 | - def _set_node_id(self, node_id): |
| 1995 | - self.subtree = node_id |
| 1996 | - node_id = property(lambda self: self.subtree, _set_node_id) |
| 1997 | - |
| 1998 | - # volume_id property |
| 1999 | - def _set_volume_id(self, volume_id): |
| 2000 | - self.id = volume_id |
| 2001 | - volume_id = property(lambda self: self.id, _set_volume_id) |
| 2002 | - |
| 2003 | - |
| 2004 | -class UDF(object): |
| 2005 | - """A representation of a User Defined Folder.""" |
| 2006 | - |
| 2007 | - def __init__(self, volume_id, node_id, suggested_path, |
| 2008 | - path, subscribed=True): |
| 2009 | + def __eq__(self, other): |
| 2010 | + result = (super(Share, self).__eq__(other) and |
| 2011 | + self.path == other.path and |
| 2012 | + self.name == other.name and |
| 2013 | + self.other_username == other.other_username and |
| 2014 | + self.other_visible_name == other.other_visible_name and |
| 2015 | + self.accepted == other.accepted and |
| 2016 | + self.access_level == other.access_level) |
| 2017 | + return result |
| 2018 | + |
| 2019 | + |
| 2020 | +class Shared(Share): |
| 2021 | + |
| 2022 | + def __init__(self, *args, **kwargs): |
| 2023 | + super(Shared, self).__init__(*args, **kwargs) |
| 2024 | + self.__dict__['type'] = 'Shared' |
| 2025 | + |
| 2026 | + |
| 2027 | +class Root(Volume): |
| 2028 | + """A volume representing the root.""" |
| 2029 | + |
| 2030 | + def __init__(self, volume_id=None, node_id=None, path=None): |
| 2031 | + """Create the Root.""" |
| 2032 | + super(Root, self).__init__(volume_id, node_id) |
| 2033 | + self.__dict__['type'] = 'Root' |
| 2034 | + self.path = path |
| 2035 | + |
| 2036 | + def __eq__(self, other): |
| 2037 | + result = (super(Root, self).__eq__(other) and |
| 2038 | + self.path == other.path) |
| 2039 | + return result |
| 2040 | + |
| 2041 | + def can_write(self): |
| 2042 | + return True |
| 2043 | + |
| 2044 | + def is_active(self): |
| 2045 | + return True |
| 2046 | + |
| 2047 | + |
| 2048 | +class UDF(Volume): |
| 2049 | + """A volume representing a User Defined Folder.""" |
| 2050 | + |
| 2051 | + def __init__(self, volume_id=None, node_id=None, |
| 2052 | + suggested_path=None, path=None, subscribed=True): |
| 2053 | """Create the UDF, subscribed by default""" |
| 2054 | - # id and node_id should be str or None |
| 2055 | - assert isinstance(volume_id, basestring) or volume_id is None |
| 2056 | - assert isinstance(node_id, basestring) or node_id is None |
| 2057 | - self.id = volume_id |
| 2058 | + super(UDF, self).__init__(volume_id, node_id) |
| 2059 | + self.__dict__['type'] = 'UDF' |
| 2060 | self.node_id = node_id |
| 2061 | self.suggested_path = suggested_path |
| 2062 | self.path = path |
| 2063 | self.subscribed = subscribed |
| 2064 | |
| 2065 | + def __repr__(self): |
| 2066 | + return "<UDF id %r, real path %r>" % (self.id, self.path) |
| 2067 | + |
| 2068 | @property |
| 2069 | def ancestors(self): |
| 2070 | """Calculate all the ancestors for this UDF's path.""" |
| 2071 | @@ -161,6 +249,11 @@ |
| 2072 | """We always can write in a UDF.""" |
| 2073 | return True |
| 2074 | |
| 2075 | + @property |
| 2076 | + def active(self): |
| 2077 | + """Returns True if the UDF is subscribed.""" |
| 2078 | + return self.subscribed |
| 2079 | + |
| 2080 | @classmethod |
| 2081 | def from_udf_volume(cls, udf_volume, path): |
| 2082 | """Creates a UDF instance from a volumes.UDFVolume. |
| 2083 | @@ -171,21 +264,18 @@ |
| 2084 | return cls(str(udf_volume.volume_id), str(udf_volume.node_id), |
| 2085 | udf_volume.suggested_path, path) |
| 2086 | |
| 2087 | - @property |
| 2088 | - def active(self): |
| 2089 | - """Returns True if the UDF is subscribed.""" |
| 2090 | - return self.subscribed |
| 2091 | - |
| 2092 | - # volume_id property |
| 2093 | - def _set_volume_id(self, volume_id): |
| 2094 | - self.id = volume_id |
| 2095 | - volume_id = property(lambda self: self.id, _set_volume_id) |
| 2096 | + def __eq__(self, other): |
| 2097 | + result = (super(UDF, self).__eq__(other) and |
| 2098 | + self.suggested_path == other.suggested_path and |
| 2099 | + self.path == other.path and |
| 2100 | + self.subscribed == other.subscribed) |
| 2101 | + return result |
| 2102 | |
| 2103 | |
| 2104 | class VolumeManager(object): |
| 2105 | """Manages shares and mount points.""" |
| 2106 | |
| 2107 | - METADATA_VERSION = '5' |
| 2108 | + METADATA_VERSION = '6' |
| 2109 | |
| 2110 | def __init__(self, main): |
| 2111 | """Create the instance and populate the shares/d attributes |
| 2112 | @@ -199,8 +289,9 @@ |
| 2113 | self._udfs_dir = os.path.join(self._data_dir, 'udfs') |
| 2114 | |
| 2115 | md_upgrader = MetadataUpgrader(self._data_dir, self._shares_dir, |
| 2116 | - self._shared_dir, self.m.root_dir, |
| 2117 | - self.m.shares_dir, self.m.shares_dir_link) |
| 2118 | + self._shared_dir, self._udfs_dir, |
| 2119 | + self.m.root_dir, self.m.shares_dir, |
| 2120 | + self.m.shares_dir_link) |
| 2121 | md_upgrader.upgrade_metadata() |
| 2122 | |
| 2123 | # build the dir layout |
| 2124 | @@ -233,7 +324,7 @@ |
| 2125 | self.shared = VMFileShelf(self._shared_dir) |
| 2126 | self.udfs = VMFileShelf(self._udfs_dir) |
| 2127 | if self.shares.get(request.ROOT) is None: |
| 2128 | - self.root = Share(self.m.root_dir) |
| 2129 | + self.root = Root(path=self.m.root_dir) |
| 2130 | else: |
| 2131 | self.root = self.shares[request.ROOT] |
| 2132 | self.root.access_level = 'Modify' |
| 2133 | @@ -356,7 +447,7 @@ |
| 2134 | self.log.warning("we got a share with 'from_me' direction," |
| 2135 | " but don't have the node_id in the metadata yet") |
| 2136 | path = None |
| 2137 | - share = Share.from_response(a_share, path) |
| 2138 | + share = Shared.from_response(a_share, path) |
| 2139 | shared.append(share.volume_id) |
| 2140 | self.add_shared(share) |
| 2141 | self._cleanup_volumes(shares, shared) |
| 2142 | @@ -583,7 +674,7 @@ |
| 2143 | mdobj = self.m.fs.get_by_path(path) |
| 2144 | mdid = mdobj.mdid |
| 2145 | marker = MDMarker(mdid) |
| 2146 | - share = Share(path=self.m.fs.get_abspath("", mdobj.path), |
| 2147 | + share = Shared(path=self.m.fs.get_abspath("", mdobj.path), |
| 2148 | volume_id=marker, |
| 2149 | name=name, access_level=access_level, |
| 2150 | other_username=username, other_visible_name=None, |
| 2151 | @@ -748,17 +839,18 @@ |
| 2152 | udf = self.udfs[udf_id] |
| 2153 | except KeyError: |
| 2154 | push_error("DOES_NOT_EXIST") |
| 2155 | - udf.subscribed = True |
| 2156 | - self.udfs[udf_id] = udf |
| 2157 | - try: |
| 2158 | - d = self._scan_udf(udf) |
| 2159 | - except KeyError, e: |
| 2160 | - push_error("METADATA_DOES_NOT_EXIST") |
| 2161 | else: |
| 2162 | - d.addCallbacks( |
| 2163 | - lambda _: self.m.event_q.push('VM_UDF_SUBSCRIBED', udf), |
| 2164 | - lambda f: push_error(f.getErrorMessage())) |
| 2165 | - return d |
| 2166 | + udf.subscribed = True |
| 2167 | + self.udfs[udf_id] = udf |
| 2168 | + try: |
| 2169 | + d = self._scan_udf(udf) |
| 2170 | + except KeyError, e: |
| 2171 | + push_error("METADATA_DOES_NOT_EXIST") |
| 2172 | + else: |
| 2173 | + d.addCallbacks( |
| 2174 | + lambda _: self.m.event_q.push('VM_UDF_SUBSCRIBED', udf), |
| 2175 | + lambda f: push_error(f.getErrorMessage())) |
| 2176 | + return d |
| 2177 | |
| 2178 | def _scan_udf(self, udf): |
| 2179 | """Local and server rescan of a UDF.""" |
| 2180 | @@ -844,7 +936,7 @@ |
| 2181 | class MetadataUpgrader(object): |
| 2182 | """A class that loads old metadata and migrate it.""" |
| 2183 | |
| 2184 | - def __init__(self, data_dir, shares_md_dir, shared_md_dir, |
| 2185 | + def __init__(self, data_dir, shares_md_dir, shared_md_dir, udfs_md_dir, |
| 2186 | root_dir, shares_dir, shares_dir_link): |
| 2187 | """Creates the instance""" |
| 2188 | self.log = logging.getLogger('ubuntuone.SyncDaemon.VM.MD') |
| 2189 | @@ -852,6 +944,7 @@ |
| 2190 | self._shares_dir = shares_dir |
| 2191 | self._shares_md_dir = shares_md_dir |
| 2192 | self._shared_md_dir = shared_md_dir |
| 2193 | + self._udfs_md_dir = udfs_md_dir |
| 2194 | self._root_dir = root_dir |
| 2195 | self._shares_dir_link = shares_dir_link |
| 2196 | self._version_file = os.path.join(self._data_dir, '.version') |
| 2197 | @@ -878,8 +971,58 @@ |
| 2198 | if not md_version: |
| 2199 | # we don't have a version of the metadata but a .version file? |
| 2200 | # assume it's None and do an upgrade from version 0 |
| 2201 | - md_version = None |
| 2202 | - else: |
| 2203 | + md_version = self._guess_metadata_version() |
| 2204 | + else: |
| 2205 | + md_version = self._guess_metadata_version() |
| 2206 | + self.log.debug('metadata version: %s', md_version) |
| 2207 | + return md_version |
| 2208 | + |
| 2209 | + def _guess_metadata_version(self): |
| 2210 | + """Try to guess the metadata version based on current metadata |
| 2211 | + and layout, fallbacks to md_version = None if can't guess it. |
| 2212 | + |
| 2213 | + """ |
| 2214 | + #md_version = None |
| 2215 | + if os.path.exists(self._shares_md_dir) \ |
| 2216 | + and os.path.exists(self._shared_md_dir): |
| 2217 | + # we have shares and shared dirs |
| 2218 | + # md_version >= 1 |
| 2219 | + old_root_dir = os.path.join(self._root_dir, 'My Files') |
| 2220 | + old_share_dir = os.path.join(self._root_dir, 'Shared With Me') |
| 2221 | + if os.path.exists(old_share_dir) and os.path.exists(old_root_dir) \ |
| 2222 | + and not os.path.islink(old_share_dir): |
| 2223 | + # md >= 1 and <= 3 |
| 2224 | + # we have a My Files dir, 'Shared With Me' isn't a |
| 2225 | + # symlink and ~/.local/share/ubuntuone/shares doesn't |
| 2226 | + # exists. |
| 2227 | + # md_version <= 3, set it to 2 as it will migrate |
| 2228 | + # .conflict to .u1conflict, and we don't need to upgrade |
| 2229 | + # from version 1 any more as the LegacyShareFileShelf |
| 2230 | + # takes care of that. |
| 2231 | + md_version = '2' |
| 2232 | + else: |
| 2233 | + try: |
| 2234 | + target = os.readlink(self._shares_dir_link) |
| 2235 | + except OSError: |
| 2236 | + target = None |
| 2237 | + if os.path.islink(self._shares_dir_link) \ |
| 2238 | + and os.path.normpath(target) == self._shares_dir_link: |
| 2239 | + # broken symlink, md_version = 4 |
| 2240 | + md_version = '4' |
| 2241 | + else: |
| 2242 | + # md_version >= 5 |
| 2243 | + shelf = LegacyShareFileShelf(self._shares_md_dir) |
| 2244 | + # check a pickled value to check if it's in version |
| 2245 | + # 5 or 6 |
| 2246 | + for key in shelf: |
| 2247 | + share = shelf[key] |
| 2248 | + if isinstance(share, _Share): |
| 2249 | + md_version = '5' |
| 2250 | + else: |
| 2251 | + md_version = '6' |
| 2252 | + break |
| 2253 | + else: |
| 2254 | + # this is metadata 'None' |
| 2255 | md_version = None |
| 2256 | return md_version |
| 2257 | |
| 2258 | @@ -902,9 +1045,6 @@ |
| 2259 | if dir != os.path.basename(backup): |
| 2260 | shutil.move(os.path.join(dirname, dir), |
| 2261 | os.path.join(backup, dir)) |
| 2262 | - # add the old module FQN to sys.modules in order to load the metadata |
| 2263 | - sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager'] = \ |
| 2264 | - sys.modules['ubuntuone.syncdaemon.volume_manager'] |
| 2265 | # regenerate the shelf using the new layout using the backup as src |
| 2266 | old_shelf = LegacyShareFileShelf(backup) |
| 2267 | if not os.path.exists(self._shares_dir): |
| 2268 | @@ -912,9 +1052,7 @@ |
| 2269 | new_shelf = LegacyShareFileShelf(self._shares_md_dir) |
| 2270 | for key in old_shelf.keys(): |
| 2271 | new_shelf[key] = old_shelf[key] |
| 2272 | - # undo the change to sys.modules |
| 2273 | - del sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager'] |
| 2274 | - # now upgrade to metadata 3 |
| 2275 | + # now upgrade to metadata 2 |
| 2276 | self._upgrade_metadata_2(md_version) |
| 2277 | |
| 2278 | def _upgrade_metadata_1(self, md_version): |
| 2279 | @@ -946,7 +1084,6 @@ |
| 2280 | for names in filenames, dirnames: |
| 2281 | self._upgrade_names(dirpath, names) |
| 2282 | self._upgrade_metadata_3(md_version) |
| 2283 | - self.update_metadata_version() |
| 2284 | |
| 2285 | def _upgrade_names(self, dirpath, names): |
| 2286 | """ |
| 2287 | @@ -997,6 +1134,14 @@ |
| 2288 | old_root_dir = os.path.join(self._root_dir, 'My Files') |
| 2289 | # change permissions |
| 2290 | os.chmod(self._root_dir, 0775) |
| 2291 | + |
| 2292 | + def move(src, dst): |
| 2293 | + """Move a file/dir taking care if it's read-only.""" |
| 2294 | + prev_mode = stat.S_IMODE(os.stat(src).st_mode) |
| 2295 | + os.chmod(src, 0755) |
| 2296 | + shutil.move(src, dst) |
| 2297 | + os.chmod(dst, prev_mode) |
| 2298 | + |
| 2299 | # update the path's in metadata and move the folder |
| 2300 | if os.path.exists(old_share_dir) and not os.path.islink(old_share_dir): |
| 2301 | os.chmod(old_share_dir, 0775) |
| 2302 | @@ -1004,14 +1149,23 @@ |
| 2303 | os.makedirs(os.path.dirname(self._shares_dir)) |
| 2304 | self.log.debug('moving shares dir from: %r to %r', |
| 2305 | old_share_dir, self._shares_dir) |
| 2306 | - shutil.move(old_share_dir, self._shares_dir) |
| 2307 | + for path in os.listdir(old_share_dir): |
| 2308 | + src = os.path.join(old_share_dir, path) |
| 2309 | + dst = os.path.join(self._shares_dir, path) |
| 2310 | + move(src, dst) |
| 2311 | + os.rmdir(old_share_dir) |
| 2312 | + |
| 2313 | # update the shares metadata |
| 2314 | shares = LegacyShareFileShelf(self._shares_md_dir) |
| 2315 | for key in shares.keys(): |
| 2316 | share = shares[key] |
| 2317 | if share.path is not None: |
| 2318 | - share.path = share.path.replace(old_share_dir, |
| 2319 | - self._shares_dir) |
| 2320 | + if share.path == old_root_dir: |
| 2321 | + share.path = share.path.replace(old_root_dir, |
| 2322 | + self._root_dir) |
| 2323 | + else: |
| 2324 | + share.path = share.path.replace(old_share_dir, |
| 2325 | + self._shares_dir) |
| 2326 | shares[key] = share |
| 2327 | |
| 2328 | shared = LegacyShareFileShelf(self._shared_md_dir) |
| 2329 | @@ -1021,7 +1175,7 @@ |
| 2330 | share.path = share.path.replace(old_root_dir, self._root_dir) |
| 2331 | shared[key] = share |
| 2332 | # move the My Files contents, taking care of dir/files with the same |
| 2333 | - # in the new root |
| 2334 | + # name in the new root |
| 2335 | if os.path.exists(old_root_dir): |
| 2336 | self.log.debug('moving My Files contents to the root') |
| 2337 | # make My Files rw |
| 2338 | @@ -1038,10 +1192,11 @@ |
| 2339 | os.remove(old_path) |
| 2340 | else: |
| 2341 | self.log.debug('moving %r to %r', old_path, new_path) |
| 2342 | - shutil.move(old_path, new_path) |
| 2343 | + move(old_path, new_path) |
| 2344 | self.log.debug('removing old root: %r', old_root_dir) |
| 2345 | os.rmdir(old_root_dir) |
| 2346 | |
| 2347 | + # fix broken symlink (md_version 4) |
| 2348 | self._upgrade_metadata_4(md_version) |
| 2349 | |
| 2350 | def _upgrade_metadata_4(self, md_version): |
| 2351 | @@ -1056,9 +1211,61 @@ |
| 2352 | self.log.debug('removing broken shares symlink: %r -> %r', |
| 2353 | self._shares_dir_link, target) |
| 2354 | os.remove(self._shares_dir_link) |
| 2355 | + self._upgrade_metadata_5(md_version) |
| 2356 | |
| 2357 | + def _upgrade_metadata_5(self, md_version): |
| 2358 | + """ |
| 2359 | + Upgrade to version 6 (plain dict storage) |
| 2360 | + """ |
| 2361 | + self.log.debug('upgrading from metadata 5') |
| 2362 | + # upgrade shares |
| 2363 | + old_shares = LegacyShareFileShelf(self._shares_md_dir) |
| 2364 | + shares = VMFileShelf(self._shares_md_dir) |
| 2365 | + for key in old_shares.keys(): |
| 2366 | + share = old_shares[key] |
| 2367 | + shares[key] = self._upgrade_share_to_volume(share) |
| 2368 | + # upgrade shared folders |
| 2369 | + old_shared = LegacyShareFileShelf(self._shared_md_dir) |
| 2370 | + shared = VMFileShelf(self._shared_md_dir) |
| 2371 | + for key in shared.keys(): |
| 2372 | + share = old_shared[key] |
| 2373 | + shared[key] = self._upgrade_share_to_volume(share, shared=True) |
| 2374 | + # upgrade the udfs |
| 2375 | + old_udfs = LegacyShareFileShelf(self._udfs_md_dir) |
| 2376 | + udfs = VMFileShelf(self._udfs_md_dir) |
| 2377 | + for key in old_udfs.keys(): |
| 2378 | + udf = old_udfs[key] |
| 2379 | + udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path, |
| 2380 | + udf.path, udf.subscribed) |
| 2381 | self.update_metadata_version() |
| 2382 | |
| 2383 | + def _upgrade_share_to_volume(self, share, shared=False): |
| 2384 | + """Upgrade from _Share to new Volume hierarchy.""" |
| 2385 | + def upgrade_share_dict(share): |
| 2386 | + """Upgrade share __dict__ to be compatible with the |
| 2387 | + new Share.__init__. |
| 2388 | + |
| 2389 | + """ |
| 2390 | + if 'subtree' in share.__dict__: |
| 2391 | + share.node_id = share.__dict__.pop('subtree') |
| 2392 | + if 'id' in share.__dict__: |
| 2393 | + share.volume_id = share.__dict__.pop('id') |
| 2394 | + if 'free_bytes' in share.__dict__: |
| 2395 | + free_bytes = share.__dict__.pop('free_bytes') |
| 2396 | + else: |
| 2397 | + free_bytes = None |
| 2398 | + return share |
| 2399 | + # handle the root special case |
| 2400 | + if share.path == self._root_dir or share.id == '': |
| 2401 | + r = Root(share.id, share.subtree, share.path) |
| 2402 | + return r |
| 2403 | + else: |
| 2404 | + share = upgrade_share_dict(share) |
| 2405 | + if shared: |
| 2406 | + return Shared(**share.__dict__) |
| 2407 | + else: |
| 2408 | + return Share(**share.__dict__) |
| 2409 | + |
| 2410 | def update_metadata_version(self): |
| 2411 | """write the version of the metadata""" |
| 2412 | if not os.path.exists(os.path.dirname(self._version_file)): |
| 2413 | @@ -1082,8 +1289,13 @@ |
| 2414 | class VMFileShelf(file_shelf.FileShelf): |
| 2415 | """ Custom file shelf that allow request.ROOT as key, it's replaced |
| 2416 | by the string: root_node_id. |
| 2417 | + |
| 2418 | """ |
| 2419 | |
| 2420 | + TYPE = 'type' |
| 2421 | + classes = dict((sub.__name__, sub) for sub in \ |
| 2422 | + Volume.__subclasses__() + Share.__subclasses__()) |
| 2423 | + |
| 2424 | def __init__(self, *args, **kwargs): |
| 2425 | """ Create the instance. """ |
| 2426 | super(VMFileShelf, self).__init__(*args, **kwargs) |
| 2427 | @@ -1103,6 +1315,22 @@ |
| 2428 | else: |
| 2429 | yield key |
| 2430 | |
| 2431 | + def _unpickle(self, fd): |
| 2432 | + """Unpickle a dict and build the class instance specified in |
| 2433 | + value['type']. |
| 2434 | + """ |
| 2435 | + value = cPickle.load(fd) |
| 2436 | + class_name = value[self.TYPE] |
| 2437 | + clazz = self.classes[class_name] |
| 2438 | + obj = clazz.__new__(clazz) |
| 2439 | + obj.__dict__.update(value) |
| 2440 | + return obj |
| 2441 | + |
| 2442 | + def _pickle(self, value, fd, protocol): |
| 2443 | + """Pickle value in fd using protocol.""" |
| 2444 | + cPickle.dump(value.__dict__, fd, protocol=protocol) |
| 2445 | + |
| 2446 | + |
| 2447 | class LegacyShareFileShelf(VMFileShelf): |
| 2448 | """A FileShelf capable of replacing pickled classes |
| 2449 | with a different class. |
| 2450 | @@ -1112,11 +1340,12 @@ |
| 2451 | """ |
| 2452 | |
| 2453 | upgrade_map = { |
| 2454 | + ('ubuntuone.syncdaemon.volume_manager', 'UDF'):_UDF, |
| 2455 | + ('ubuntuone.syncdaemon.volume_manager', 'Share'):_Share, |
| 2456 | ('canonical.ubuntuone.storage.syncdaemon.volume_manager', |
| 2457 | - 'Share'):Share |
| 2458 | + 'Share'):_Share |
| 2459 | } |
| 2460 | |
| 2461 | - |
| 2462 | def _find_global(self, module, name): |
| 2463 | """Returns the class object for (module, name) or None.""" |
| 2464 | # handle our 'migration types' |


This branch is the last of the VM refactor series:
- implement new Volume hierarchy in VolumeManager
- implenment VMFileShelf to pickle VM metadata as dict
- add metadata migration to new version
- refactor md migration tests:
- fix a few bugs found (thanks to better tests) in previous metadata migration functions
- update tools.py and dbus_interface.py
- update tools, dbus and vm tests