Merge lp:~verterok/ubuntuone-client/vm-refactor into lp:ubuntuone-client

Proposed by Guillermo Gonzalez
Status: Merged
Approved by: Rick McBride
Approved revision: not available
Merged at revision: not available
Proposed branch: lp:~verterok/ubuntuone-client/vm-refactor
Merge into: lp:ubuntuone-client
Prerequisite: lp:~verterok/ubuntuone-client/vm-pre-refactor-api-3
Diff against target: 2464 lines (+1161/-420)
9 files modified
tests/syncdaemon/test_dbus.py (+49/-34)
tests/syncdaemon/test_eq_inotify.py (+108/-21)
tests/syncdaemon/test_eventqueue.py (+3/-1)
tests/syncdaemon/test_tools.py (+2/-2)
tests/syncdaemon/test_vm.py (+562/-218)
ubuntuone/syncdaemon/dbus_interface.py (+6/-2)
ubuntuone/syncdaemon/event_queue.py (+115/-55)
ubuntuone/syncdaemon/tools.py (+6/-6)
ubuntuone/syncdaemon/volume_manager.py (+310/-81)
To merge this branch: bzr merge lp:~verterok/ubuntuone-client/vm-refactor
Reviewer Review Type Date Requested Status
Zachery Bir (community) Approve
Facundo Batista (community) Approve
Review via email: mp+18088@code.launchpad.net

Commit message

VolumeManager Volume hierarchy refactor and metadata migration to new version

To post a comment you must log in.
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

This branch is the last of the VM refactor series:
 - implement new Volume hierarchy in VolumeManager
 - implenment VMFileShelf to pickle VM metadata as dict
 - add metadata migration to new version
 - refactor md migration tests:
  - fix a few bugs found (thanks to better tests) in previous metadata migration functions
 - update tools.py and dbus_interface.py
 - update tools, dbus and vm tests

328. By Guillermo Gonzalez

merge with trunk

329. By Guillermo Gonzalez

merge lp:~facundo/ubuntuone-client/second-inotify-processor

330. By Guillermo Gonzalez

fix tools.show_folders function to use volume_id instead of id

Revision history for this message
Facundo Batista (facundo) wrote :

===============================================================================
[ERROR]: tests.syncdaemon.test_u1sdtool.U1SDToolTests.test_show_folders

Traceback (most recent call last):
  File "/usr/lib/python2.6/dist-packages/twisted/internet/defer.py", line 749, in _inlineCallbacks
    result = result.throwExceptionIntoGenerator(g)
  File "/usr/lib/python2.6/dist-packages/twisted/python/failure.py", line 338, in throwExceptionIntoGenerator
    return g.throw(self.type, self.value, self.tb)
  File "/home/facundo/canonical/u1-client/review_vm-refactor/tests/syncdaemon/test_u1sdtool.py", line 291, in test_show_folders
    yield d
  File "/usr/lib/python2.6/dist-packages/twisted/internet/defer.py", line 328, in _runCallbacks
    self.result = callback(self.result, *args, **kw)
  File "/home/facundo/canonical/u1-client/review_vm-refactor/tests/syncdaemon/test_u1sdtool.py", line 286, in <lambda>
    d.addCallback(lambda result: show_folders(result, out))
  File "/home/facundo/canonical/u1-client/review_vm-refactor/ubuntuone/syncdaemon/tools.py", line 618, in show_folders
    out.write(msg_template % (folder['id'], folder['subscribed'],
exceptions.KeyError: 'id'
-------------------------------------------------------------------------------

review: Needs Fixing
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

Already pushed and fixed in revno: 330, thanks!

Revision history for this message
Facundo Batista (facundo) wrote :

Ok, that is already fixed, and all the tests went ok!

review: Approve
Revision history for this message
Zachery Bir (urbanape) wrote :

All tests pass. Gonna say go.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'tests/syncdaemon/test_dbus.py'
--- tests/syncdaemon/test_dbus.py 2010-01-25 22:28:46 +0000
+++ tests/syncdaemon/test_dbus.py 2010-01-26 20:35:29 +0000
@@ -36,7 +36,7 @@
36 DBUS_IFACE_FOLDERS_NAME,36 DBUS_IFACE_FOLDERS_NAME,
37 EventListener,37 EventListener,
38)38)
39from ubuntuone.syncdaemon.volume_manager import Share, UDF39from ubuntuone.syncdaemon.volume_manager import Share, Shared, UDF
40from ubuntuone.syncdaemon.tools import DBusClient40from ubuntuone.syncdaemon.tools import DBusClient
41from ubuntuone.syncdaemon import event_queue, states, main, config41from ubuntuone.syncdaemon import event_queue, states, main, config
42from contrib.testing.testcase import (42from contrib.testing.testcase import (
@@ -500,21 +500,23 @@
500 access_level='Read', accepted=False))500 access_level='Read', accepted=False))
501 client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)501 client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)
502 d = defer.Deferred()502 d = defer.Deferred()
503 def shares_handler(shares):503 def check(shares):
504 """ handle get_shares reply """504 """ handle get_shares reply """
505 self.assertEquals(1, len(shares))505 self.assertEquals(1, len(shares))
506 for share in shares:506 for share in shares:
507 if share['id'] == '':507 if share['volume_id'] == '':
508 self.assertEquals('', str(share['id']))508 self.assertEquals('', str(share['volume_id']))
509 self.assertEquals(self.root_dir, str(share['path']))509 self.assertEquals(self.root_dir, str(share['path']))
510 self.assertEquals('Modify', str(share['access_level']))510 self.assertEquals('Modify', str(share['access_level']))
511 self.assertEquals('False', str(share['accepted']))511 self.assertEquals('False', str(share['accepted']))
512 else:512 else:
513 self.assertEquals('share_id', str(share['id']))513 self.assertEquals('share_id', str(share['volume_id']))
514 self.assertEquals(share_path, str(share['path']))514 self.assertEquals(share_path, str(share['path']))
515 self.assertEquals('Read', str(share['access_level']))515 self.assertEquals('Read', str(share['access_level']))
516 self.assertEquals('False', str(share['accepted']))516 self.assertEquals('False', str(share['accepted']))
517 d.callback(True)517
518 def shares_handler(shares):
519 d.callback(shares)
518520
519 client.call_method('get_shares', reply_handler=shares_handler,521 client.call_method('get_shares', reply_handler=shares_handler,
520 error_handler=self.error_handler)522 error_handler=self.error_handler)
@@ -539,7 +541,7 @@
539 def check(result):541 def check(result):
540 """the async checker"""542 """the async checker"""
541 self.assertEquals('Yes', result['answer'])543 self.assertEquals('Yes', result['answer'])
542 self.assertEquals('share_id', result['share_id'])544 self.assertEquals('share_id', result['volume_id'])
543 self.assertEquals(True, self.main.vm.shares['share_id'].accepted)545 self.assertEquals(True, self.main.vm.shares['share_id'].accepted)
544546
545 d.addCallback(check)547 d.addCallback(check)
@@ -563,7 +565,7 @@
563 def check(result):565 def check(result):
564 """the async checker"""566 """the async checker"""
565 self.assertEquals('No', result['answer'])567 self.assertEquals('No', result['answer'])
566 self.assertEquals('share_id', result['share_id'])568 self.assertEquals('share_id', result['volume_id'])
567 self.assertEquals(False, self.main.vm.shares['share_id'].accepted)569 self.assertEquals(False, self.main.vm.shares['share_id'].accepted)
568570
569 d.addCallback(check)571 d.addCallback(check)
@@ -648,8 +650,8 @@
648 self.assertEquals(1, len(results))650 self.assertEquals(1, len(results))
649 shared = results[0]651 shared = results[0]
650 self.assertEquals(a_dir, str(shared['path']))652 self.assertEquals(a_dir, str(shared['path']))
651 self.assertEquals('node_id', str(shared['subtree']))653 self.assertEquals('node_id', str(shared['node_id']))
652 self.assertEquals('share_id', str(shared['id']))654 self.assertEquals('share_id', str(shared['volume_id']))
653 self.assertEquals('View', str(shared['access_level']))655 self.assertEquals('View', str(shared['access_level']))
654 d.callback(True)656 d.callback(True)
655 client.call_method('get_shared',657 client.call_method('get_shared',
@@ -680,14 +682,15 @@
680 self.assertEquals(1, len(results))682 self.assertEquals(1, len(results))
681 shared = results[0]683 shared = results[0]
682 self.assertEquals('', str(shared['path']))684 self.assertEquals('', str(shared['path']))
683 self.assertEquals('node_id', str(shared['subtree']))685 self.assertEquals('node_id', str(shared['node_id']))
684 self.assertEquals('share_id', str(shared['id']))686 self.assertEquals('share_id', str(shared['volume_id']))
685 self.assertEquals('View', str(shared['access_level']))687 self.assertEquals('View', str(shared['access_level']))
686 d.callback(True)688 d.callback(True)
687 client.call_method('get_shared',689 client.call_method('get_shared',
688 reply_handler=reply_handler,690 reply_handler=reply_handler,
689 error_handler=self.error_handler)691 error_handler=self.error_handler)
690 return d692 return d
693
691 def test_refresh_shares(self):694 def test_refresh_shares(self):
692 """ Just check that refresh_shares method API works. """695 """ Just check that refresh_shares method API works. """
693 client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)696 client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)
@@ -800,7 +803,7 @@
800 a_dir = os.path.join(self.root_dir, u'ñoño'.encode('utf-8'))803 a_dir = os.path.join(self.root_dir, u'ñoño'.encode('utf-8'))
801 self.fs_manager.create(a_dir, "", is_dir=True)804 self.fs_manager.create(a_dir, "", is_dir=True)
802 self.fs_manager.set_node_id(a_dir, "node_id")805 self.fs_manager.set_node_id(a_dir, "node_id")
803 share = Share(path=a_dir, volume_id='shared_id', name=u'ñoño_shared',806 share = Shared(path=a_dir, volume_id='shared_id', name=u'ñoño_shared',
804 access_level='View', other_username=u'test_username',807 access_level='View', other_username=u'test_username',
805 node_id='node_id')808 node_id='node_id')
806 self.main.vm.add_shared(share)809 self.main.vm.add_shared(share)
@@ -812,8 +815,8 @@
812 self.assertEquals(1, len(results))815 self.assertEquals(1, len(results))
813 shared = results[0]816 shared = results[0]
814 self.assertEquals(a_dir, shared['path'].encode('utf-8'))817 self.assertEquals(a_dir, shared['path'].encode('utf-8'))
815 self.assertEquals('node_id', str(shared['subtree']))818 self.assertEquals('node_id', str(shared['node_id']))
816 self.assertEquals('shared_id', str(shared['id']))819 self.assertEquals('shared_id', str(shared['volume_id']))
817 self.assertEquals('View', str(shared['access_level']))820 self.assertEquals('View', str(shared['access_level']))
818821
819 d.addCallback(check)822 d.addCallback(check)
@@ -1072,14 +1075,15 @@
1072 u'visible_name', 'Write')1075 u'visible_name', 'Write')
10731076
1074 d = defer.Deferred()1077 d = defer.Deferred()
1075 def share_handler(share):1078 def check(share):
1076 """ handler for ShareChanged signal. """1079 """ handler for ShareChanged signal. """
1077 self.assertEquals('a_share_id', str(share['id']))1080 self.assertEquals('a_share_id', str(share['volume_id']))
1078 self.assertEquals(share_path, str(share['path']))1081 self.assertEquals(share_path, str(share['path']))
1079 self.assertEquals('Write', str(share['access_level']))1082 self.assertEquals('Write', str(share['access_level']))
1080 self.assertEquals('False', str(share['accepted']))1083 self.assertEquals('', str(share['accepted']))
1081 d.callback(True)1084 d.addCallback(check)
10821085 def share_handler(result):
1086 d.callback(result)
1083 match = self.bus.add_signal_receiver(share_handler,1087 match = self.bus.add_signal_receiver(share_handler,
1084 signal_name='ShareChanged')1088 signal_name='ShareChanged')
1085 self.signal_receivers.add(match)1089 self.signal_receivers.add(match)
@@ -1089,25 +1093,36 @@
1089 def test_share_deleted(self):1093 def test_share_deleted(self):
1090 """ Test the ShareDeleted signal. """1094 """ Test the ShareDeleted signal. """
1091 share_path = os.path.join(self.main.shares_dir, 'share')1095 share_path = os.path.join(self.main.shares_dir, 'share')
1092 self.main.vm.add_share(Share(path=share_path, volume_id='a_share_id',
1093 access_level='Read', accepted=False))
1094 share_holder = NotifyShareHolder.from_params('a_share_id', 'subtree',1096 share_holder = NotifyShareHolder.from_params('a_share_id', 'subtree',
1095 u'fake_share',1097 u'fake_share',
1096 u'test_username',1098 u'test_username',
1097 u'visible_name', 'Read')1099 u'visible_name', 'Read')
10981100
1101 self.main.vm.add_share(Share.from_notify_holder(share_holder, share_path))
1099 d = defer.Deferred()1102 d = defer.Deferred()
1100 def share_handler(share_dict):1103 def share_handler(share_dict):
1101 """ handler for ShareDeletedsignal. """1104 """ handler for ShareDeletedsignal. """
1102 expected_dict = dict(share_id='a_share_id',1105 d.callback(share_dict)
1103 subtree='subtree',1106
1104 share_name=u'fake_share',1107 match = self.bus.add_signal_receiver(share_handler,
1105 from_username=u'test_username',1108 signal_name='ShareDeleted')
1106 from_visible_name=u'visible_name',1109 self.signal_receivers.add(match)
1110
1111 def check(share_dict):
1112 """Check the result."""
1113 expected_dict = dict(volume_id='a_share_id',
1114 node_id='subtree',
1115 name=u'fake_share',
1116 other_username=u'test_username',
1117 other_visible_name=u'visible_name',
1118 free_bytes='',
1119 path=share_path,
1120 accepted='',
1107 access_level='Read')1121 access_level='Read')
1122 expected_dict['type'] = 'Share'
1108 for k, v in share_dict.items():1123 for k, v in share_dict.items():
1109 self.assertEquals(expected_dict[str(k)], str(v))1124 self.assertEquals(expected_dict[str(k)], str(v))
1110 d.callback(True)1125 d.addCallback(check)
11111126
1112 match = self.bus.add_signal_receiver(share_handler,1127 match = self.bus.add_signal_receiver(share_handler,
1113 signal_name='ShareDeleted')1128 signal_name='ShareDeleted')
@@ -1495,7 +1510,7 @@
1495 udf_dict = self.dbus_iface.folders._get_udf_dict(udf)1510 udf_dict = self.dbus_iface.folders._get_udf_dict(udf)
1496 # check the path it's unicode1511 # check the path it's unicode
1497 self.assertEquals(udf_dict['path'], udf.path.decode('utf-8'))1512 self.assertEquals(udf_dict['path'], udf.path.decode('utf-8'))
1498 self.assertEquals(udf_dict['id'], udf.volume_id)1513 self.assertEquals(udf_dict['volume_id'], udf.id)
1499 self.assertEquals(udf_dict['suggested_path'], udf.suggested_path)1514 self.assertEquals(udf_dict['suggested_path'], udf.suggested_path)
1500 self.assertEquals(udf_dict['node_id'], udf.node_id)1515 self.assertEquals(udf_dict['node_id'], udf.node_id)
1501 self.assertFalse(udf_dict['subscribed'])1516 self.assertFalse(udf_dict['subscribed'])
@@ -1684,13 +1699,13 @@
1684 d = defer.Deferred()1699 d = defer.Deferred()
1685 def delete_volume(path):1700 def delete_volume(path):
1686 """Fake delete_volume"""1701 """Fake delete_volume"""
1687 self.main.event_q.push("AQ_DELETE_VOLUME_OK", volume_id=udf.volume_id)1702 self.main.event_q.push("AQ_DELETE_VOLUME_OK", volume_id=udf.id)
1688 self.main.action_q.delete_volume = delete_volume1703 self.main.action_q.delete_volume = delete_volume
1689 def deleted_handler(info):1704 def deleted_handler(info):
1690 """FolderDeleted handler."""1705 """FolderDeleted handler."""
1691 self.assertRaises(KeyError, self.main.fs.get_by_path,1706 self.assertRaises(KeyError, self.main.fs.get_by_path,
1692 info['path'].decode('utf-8'))1707 info['path'].decode('utf-8'))
1693 self.assertRaises(KeyError, self.main.vm.get_volume, info['id'])1708 self.assertRaises(KeyError, self.main.vm.get_volume, info['volume_id'])
1694 d.callback(True)1709 d.callback(True)
1695 match = self.bus.add_signal_receiver(deleted_handler,1710 match = self.bus.add_signal_receiver(deleted_handler,
1696 signal_name='FolderDeleted')1711 signal_name='FolderDeleted')
@@ -1719,7 +1734,7 @@
1719 self.main.action_q.delete_volume = delete_volume1734 self.main.action_q.delete_volume = delete_volume
1720 def deleted_error_handler(info, error):1735 def deleted_error_handler(info, error):
1721 """FolderDeleteError handler"""1736 """FolderDeleteError handler"""
1722 self.assertEquals(info['id'], udf.volume_id)1737 self.assertEquals(info['volume_id'], udf.volume_id)
1723 self.assertEquals(error, "I'm broken")1738 self.assertEquals(error, "I'm broken")
1724 d.callback(True)1739 d.callback(True)
1725 match = self.bus.add_signal_receiver(deleted_error_handler,1740 match = self.bus.add_signal_receiver(deleted_error_handler,
@@ -1821,7 +1836,7 @@
1821 """FolderDeleted handler."""1836 """FolderDeleted handler."""
1822 self.assertRaises(KeyError, self.main.fs.get_by_path,1837 self.assertRaises(KeyError, self.main.fs.get_by_path,
1823 info['path'].decode('utf-8'))1838 info['path'].decode('utf-8'))
1824 self.assertRaises(KeyError, self.main.vm.get_volume, info['id'])1839 self.assertRaises(KeyError, self.main.vm.get_volume, info['volume_id'])
1825 d.callback(True)1840 d.callback(True)
1826 match = self.bus.add_signal_receiver(deleted_handler,1841 match = self.bus.add_signal_receiver(deleted_handler,
1827 signal_name='ShareDeleted')1842 signal_name='ShareDeleted')
@@ -1852,7 +1867,7 @@
1852 self.main.action_q.delete_volume = delete_volume1867 self.main.action_q.delete_volume = delete_volume
1853 def deleted_error_handler(info, error):1868 def deleted_error_handler(info, error):
1854 """FolderDeleteError handler"""1869 """FolderDeleteError handler"""
1855 self.assertEquals(info['id'], share.volume_id)1870 self.assertEquals(info['volume_id'], share.volume_id)
1856 self.assertEquals(error, "I'm broken")1871 self.assertEquals(error, "I'm broken")
1857 d.callback(True)1872 d.callback(True)
1858 match = self.bus.add_signal_receiver(deleted_error_handler,1873 match = self.bus.add_signal_receiver(deleted_error_handler,
18591874
=== modified file 'tests/syncdaemon/test_eq_inotify.py'
--- tests/syncdaemon/test_eq_inotify.py 2010-01-26 19:15:10 +0000
+++ tests/syncdaemon/test_eq_inotify.py 2010-01-26 20:35:29 +0000
@@ -33,31 +33,104 @@
33class WatchTests(BaseEQTestCase):33class WatchTests(BaseEQTestCase):
34 """Test the EQ API to add and remove watchs."""34 """Test the EQ API to add and remove watchs."""
3535
36 def test_add_watch(self):36 def _create_udf(self, path):
37 """Test that watchs can be added."""37 """Create an UDF and returns it and the volume"""
38 os.makedirs(path)
39 udf = volume_manager.UDF("vol_id", "node_id", path, path, True)
40 self.vm.add_udf(udf)
41
42 def test_add_general_watch(self):
43 """Test that general watchs can be added."""
38 # we should have what we asked for44 # we should have what we asked for
39 self.eq.inotify_add_watch(self.root_dir)45 self.eq.inotify_add_watch(self.root_dir)
40 # pylint: disable-msg=W021246
41 self.assertTrue(self.root_dir in self.eq._watchs)47 # check only added dir in watchs
4248 # pylint: disable-msg=W0212
43 # we shouldn't have other stuff49 self.assertTrue(self.root_dir in self.eq._general_watchs)
44 self.assertTrue("not-added-dir" not in self.eq._watchs)50 self.assertTrue("not-added-dir" not in self.eq._general_watchs)
4551
46 def test_rm_watch(self):52 # nothing in the udf ancestors watch
47 """Test that watchs can be removed."""53 self.assertEqual(self.eq._ancestors_watchs, {})
48 # remove what we added54
55 def test_add_watch_on_udf_ancestor(self):
56 """Test that ancestors watchs can be added."""
57 # create the udf and add the watch
58 path_udf = os.path.join(self.home_dir, "path/to/UDF")
59 self._create_udf(path_udf)
60 path_ancestor = os.path.join(self.home_dir, "path")
61 self.eq.inotify_add_watch(path_ancestor)
62
63 # check only added dir in watchs
64 # pylint: disable-msg=W0212
65 self.assertTrue(path_ancestor in self.eq._ancestors_watchs)
66 self.assertTrue("not-added-dir" not in self.eq._ancestors_watchs)
67
68 # nothing in the general watch
69 self.assertEqual(self.eq._general_watchs, {})
70
71 def test_add_watch_on_udf_exact(self):
72 """Test adding the watch exactly on UDF."""
73 # create the udf and add the watch
74 path_udf = os.path.join(self.home_dir, "path/to/UDF")
75 self._create_udf(path_udf)
76 self.eq.inotify_add_watch(path_udf)
77
78 # pylint: disable-msg=W0212
79 self.assertTrue(path_udf in self.eq._general_watchs)
80 self.assertEqual(self.eq._ancestors_watchs, {})
81
82 def test_add_watch_on_udf_child(self):
83 """Test adding the watch inside UDF."""
84 # create the udf and add the watch
85 path_udf = os.path.join(self.home_dir, "path/to/UDF")
86 self._create_udf(path_udf)
87 path_ancestor = os.path.join(self.home_dir, "path/to/UDF/inside")
88 os.mkdir(path_ancestor)
89 self.eq.inotify_add_watch(path_ancestor)
90
91 # pylint: disable-msg=W0212
92 self.assertTrue(path_ancestor in self.eq._general_watchs)
93 self.assertEqual(self.eq._ancestors_watchs, {})
94
95 def test_rm_watch_wrong(self):
96 """Test that general watchs can be removed."""
97 # add two types of watchs
49 self.eq.inotify_add_watch(self.root_dir)98 self.eq.inotify_add_watch(self.root_dir)
50 self.eq.inotify_rm_watch(self.root_dir)99 path_udf = os.path.join(self.home_dir, "path/to/UDF")
51 # pylint: disable-msg=W0212100 self._create_udf(path_udf)
52 self.assertTrue(self.root_dir not in self.eq._watchs)101 path_ancestor = os.path.join(self.home_dir, "path")
102 self.eq.inotify_add_watch(path_ancestor)
53103
54 # remove different stuff104 # remove different stuff
55 self.eq.inotify_add_watch(self.root_dir)
56 self.assertRaises(ValueError,105 self.assertRaises(ValueError,
57 self.eq.inotify_rm_watch, "not-added-dir")106 self.eq.inotify_rm_watch, "not-added-dir")
58107
59 def test_has_watch(self):108 def test_rm_watch_general(self):
60 """Test that a path is watched."""109 """Test that general watchs can be removed."""
110 # remove what we added
111 self.eq.inotify_add_watch(self.root_dir)
112 self.eq.inotify_rm_watch(self.root_dir)
113
114 # pylint: disable-msg=W0212
115 self.assertEqual(self.eq._general_watchs, {})
116 self.assertEqual(self.eq._ancestors_watchs, {})
117
118 def test_rm_watch_ancestor(self):
119 """Test that ancestor watchs can be removed."""
120 # create the udf and add the watch
121 path_udf = os.path.join(self.home_dir, "path/to/UDF")
122 self._create_udf(path_udf)
123 path_ancestor = os.path.join(self.home_dir, "path")
124 self.eq.inotify_add_watch(path_ancestor)
125
126 # remove what we added
127 self.eq.inotify_rm_watch(path_ancestor)
128 # pylint: disable-msg=W0212
129 self.assertEqual(self.eq._general_watchs, {})
130 self.assertEqual(self.eq._ancestors_watchs, {})
131
132 def test_has_watch_general(self):
133 """Test that a general path is watched."""
61 self.assertFalse(self.eq.inotify_has_watch(self.root_dir))134 self.assertFalse(self.eq.inotify_has_watch(self.root_dir))
62135
63 # add136 # add
@@ -68,6 +141,24 @@
68 self.eq.inotify_rm_watch(self.root_dir)141 self.eq.inotify_rm_watch(self.root_dir)
69 self.assertFalse(self.eq.inotify_has_watch(self.root_dir))142 self.assertFalse(self.eq.inotify_has_watch(self.root_dir))
70143
144 def test_has_watch_ancestor(self):
145 """Test that an ancestor path is watched."""
146 path_udf = os.path.join(self.home_dir, "path/to/UDF")
147 self._create_udf(path_udf)
148 path_ancestor = os.path.join(self.home_dir, "path")
149
150 self.assertFalse(self.eq.inotify_has_watch(path_ancestor))
151
152 # add
153 # create the udf and add the watch
154 self.eq.inotify_add_watch(path_ancestor)
155 self.assertTrue(self.eq.inotify_has_watch(path_ancestor))
156
157 # remove
158 self.eq.inotify_rm_watch(path_ancestor)
159 self.assertFalse(self.eq.inotify_has_watch(path_ancestor))
160
161
71class DynamicHitMe(object):162class DynamicHitMe(object):
72 """Helper class to test a sequence of signals."""163 """Helper class to test a sequence of signals."""
73164
@@ -1157,7 +1248,6 @@
11571248
1158 @param msg: A string describing the failure that's included in the1249 @param msg: A string describing the failure that's included in the
1159 exception.1250 exception.
1160
1161 """1251 """
1162 if not first == second:1252 if not first == second:
1163 if msg is None:1253 if msg is None:
@@ -1246,9 +1336,6 @@
1246 suggested_path, path, True)1336 suggested_path, path, True)
1247 other_ancestors = other_udf.ancestors1337 other_ancestors = other_udf.ancestors
12481338
1249 # pylint: disable-msg=W0212
1250 assert not self.eq._processor._is_udf_ancestor(path)
1251
1252 os.makedirs(path)1339 os.makedirs(path)
1253 # every ancestor has a watch already, added by LocalRescan. Copy that.1340 # every ancestor has a watch already, added by LocalRescan. Copy that.
1254 self.eq.inotify_add_watch(other_udf.path)1341 self.eq.inotify_add_watch(other_udf.path)
12551342
=== modified file 'tests/syncdaemon/test_eventqueue.py'
--- tests/syncdaemon/test_eventqueue.py 2009-11-20 22:00:25 +0000
+++ tests/syncdaemon/test_eventqueue.py 2010-01-26 20:35:29 +0000
@@ -39,9 +39,11 @@
39 self.fsmdir = self.mktemp('fsmdir')39 self.fsmdir = self.mktemp('fsmdir')
40 self.partials_dir = self.mktemp('partials_dir')40 self.partials_dir = self.mktemp('partials_dir')
41 self.root_dir = self.mktemp('root_dir')41 self.root_dir = self.mktemp('root_dir')
42 self.home_dir = self.mktemp('home_dir')
43 self.vm = testcase.FakeVolumeManager(self.root_dir)
42 self.fs = filesystem_manager.FileSystemManager(self.fsmdir,44 self.fs = filesystem_manager.FileSystemManager(self.fsmdir,
43 self.partials_dir,45 self.partials_dir,
44 testcase.FakeVolumeManager(self.root_dir))46 self.vm)
45 self.fs.create(path=self.root_dir,47 self.fs.create(path=self.root_dir,
46 share_id='', is_dir=True)48 share_id='', is_dir=True)
47 self.fs.set_by_path(path=self.root_dir,49 self.fs.set_by_path(path=self.root_dir,
4850
=== modified file 'tests/syncdaemon/test_tools.py'
--- tests/syncdaemon/test_tools.py 2010-01-26 19:11:29 +0000
+++ tests/syncdaemon/test_tools.py 2010-01-26 20:35:29 +0000
@@ -182,7 +182,7 @@
182 def check(result):182 def check(result):
183 """do the asserts"""183 """do the asserts"""
184 self.assertEquals('Yes', result['answer'])184 self.assertEquals('Yes', result['answer'])
185 self.assertEquals('share_id', result['share_id'])185 self.assertEquals('share_id', result['volume_id'])
186 self.assertEquals(True, self.main.vm.shares['share_id'].accepted)186 self.assertEquals(True, self.main.vm.shares['share_id'].accepted)
187187
188 d.addCallback(check)188 d.addCallback(check)
@@ -199,7 +199,7 @@
199 def check(result):199 def check(result):
200 """do the asserts"""200 """do the asserts"""
201 self.assertEquals('No', result['answer'])201 self.assertEquals('No', result['answer'])
202 self.assertEquals('share_id', result['share_id'])202 self.assertEquals('share_id', result['volume_id'])
203 self.assertEquals(False, self.main.vm.shares['share_id'].accepted)203 self.assertEquals(False, self.main.vm.shares['share_id'].accepted)
204204
205 d.addCallback(check)205 d.addCallback(check)
206206
=== modified file 'tests/syncdaemon/test_vm.py'
--- tests/syncdaemon/test_vm.py 2010-01-26 19:40:05 +0000
+++ tests/syncdaemon/test_vm.py 2010-01-26 20:35:29 +0000
@@ -21,7 +21,6 @@
21import logging21import logging
22import os22import os
23import uuid23import uuid
24import sys
2524
26from ubuntuone.storageprotocol.client import ListShares, ListVolumes25from ubuntuone.storageprotocol.client import ListShares, ListVolumes
27from ubuntuone.storageprotocol.sharersp import (26from ubuntuone.storageprotocol.sharersp import (
@@ -36,10 +35,16 @@
36)35)
37from ubuntuone.syncdaemon.volume_manager import (36from ubuntuone.syncdaemon.volume_manager import (
38 Share,37 Share,
38 Shared,
39 UDF,
40 Root,
41 _Share,
42 _UDF,
39 allow_writes,43 allow_writes,
40 UDF,
41 VolumeManager,44 VolumeManager,
42 LegacyShareFileShelf,45 LegacyShareFileShelf,
46 MetadataUpgrader,
47 VMFileShelf,
43)48)
44from twisted.internet import defer, reactor49from twisted.internet import defer, reactor
4550
@@ -1373,64 +1378,118 @@
1373 self.assertTrue(isinstance(share.node_id, basestring))1378 self.assertTrue(isinstance(share.node_id, basestring))
13741379
13751380
1376class ShareShelfUpgradeTests(BaseTwistedTestCase):1381class MetadataTestCase(BaseTwistedTestCase):
1377 """ Tests for shares shelf upgrades"""1382 md_version_None = False
1383 main = None
1384 data_dir = None
1385 share_md_dir = None
1386 shared_md_dir = None
1387 partials_dir = None
1388 u1_dir = None
1389 root_dir = None
1390 shares_dir = None
1391 shares_dir_link = None
13781392
1379 def setUp(self):1393 def setUp(self):
1380 """ setup the test """1394 """Create some directories."""
1381 BaseTwistedTestCase.setUp(self)1395 BaseTwistedTestCase.setUp(self)
1382 self.root_dir = self.mktemp('Ubuntu One')1396 self.data_dir = os.path.join(self.tmpdir, 'data_dir')
1383 self.data_dir = self.mktemp('data_dir')1397 self.vm_data_dir = os.path.join(self.tmpdir, 'data_dir', 'vm')
1384 self.partials_dir = self.mktemp('partials_dir')1398 self.partials_dir = self.mktemp('partials')
1385 self.shares_dir = self.mktemp(os.path.join('Ubuntu One',1399 self.u1_dir = os.path.join(self.tmpdir, 'Ubuntu One')
1386 'Shared with Me'))1400 self.version_file = os.path.join(self.vm_data_dir, '.version')
13871401
1388 def tearDown(self):1402 def tearDown(self):
1389 """Cleanup main and remove the temp dir."""1403 """Cleanup all the cruft."""
1390 main = getattr(self, 'main', None)1404 for path in [self.data_dir, self.partials_dir, self.root_dir,
1391 if main:1405 self.shares_dir]:
1392 main.shutdown()1406 if path and os.path.exists(path):
1393 if os.path.exists(self.root_dir):1407 self.rmtree(path)
1394 self.rmtree(self.root_dir)1408 if self.main:
1395 if os.path.exists(self.data_dir):1409 self.main.shutdown()
1396 self.rmtree(self.data_dir)
1397 if os.path.exists(self.shares_dir):
1398 self.rmtree(self.shares_dir)
1399 VolumeManager.METADATA_VERSION = CURRENT_METADATA_VERSION1410 VolumeManager.METADATA_VERSION = CURRENT_METADATA_VERSION
1400 return BaseTwistedTestCase.tearDown(self)1411 BaseTwistedTestCase.tearDown(self)
14011412
1402 def check_version(self):1413 def check_version(self):
1403 """ check if the current version in the .version file is the lastone.1414 """Check if the current version in the .version file is the last one."""
1404 """1415 with open(self.version_file, 'r') as fd:
1405 with open(os.path.join(self.data_dir, 'vm', '.version'), 'r') as fd:
1406 self.assertEquals(CURRENT_METADATA_VERSION, fd.read().strip())1416 self.assertEquals(CURRENT_METADATA_VERSION, fd.read().strip())
14071417
1408 def test_0_to_1(self):1418 def set_md_version(self, md_version):
1409 """ Test the upgrade from the first shelf layout version to v. 1"""1419 """Write md_version to the .version file."""
1410 # ensure a clean data_dir1420 if not os.path.exists(self.vm_data_dir):
1411 self.rmtree(self.data_dir)1421 os.makedirs(self.vm_data_dir)
1412 vm_data_dir = os.path.join(self.data_dir, 'vm')1422 with open(self.version_file, 'w') as fd:
1413 old_shelf = LegacyShareFileShelf(vm_data_dir)1423 fd.write(md_version)
1424
1425
1426class MetadataOldLayoutTests(MetadataTestCase):
1427 """Tests for 'old' layouts and metadata upgrade"""
1428
1429 def setUp(self):
1430 MetadataTestCase.setUp(self)
1431 self.root_dir = os.path.join(self.u1_dir, 'My Files')
1432 self.shares_dir = os.path.join(self.u1_dir, 'Shared With Me')
1433 self.new_root_dir = self.u1_dir
1434 self.new_shares_dir = self.mktemp('shares_dir')
1435
1436 def tearDown(self):
1437 """Cleanup all the cruft."""
1438 for path in [self.u1_dir, self.new_shares_dir]:
1439 if path and os.path.exists(path):
1440 self.rmtree(path)
1441 MetadataTestCase.tearDown(self)
1442
1443 def _build_layout_version_0(self):
1444 """Build the dir structure to mimic md v.0/None."""
1445 self.share_md_dir = os.path.join(self.tmpdir, 'data_dir', 'vm')
1446 os.makedirs(self.share_md_dir)
1447 os.makedirs(self.root_dir)
1448 os.makedirs(self.shares_dir)
1449
1450 def _build_layout_version_1(self):
1451 """Build the dir structure to mimic md v.1"""
1452 self.share_md_dir = os.path.join(self.vm_data_dir, 'shares')
1453 self.shared_md_dir = os.path.join(self.vm_data_dir, 'shared')
1454 os.makedirs(self.share_md_dir)
1455 os.makedirs(self.shared_md_dir)
1456 os.makedirs(self.root_dir)
1457 os.makedirs(self.shares_dir)
1458
1459 def _set_permissions(self):
1460 """Set the RO perms in the root and the shares directory."""
1461 os.chmod(self.shares_dir, 0500)
1462 os.chmod(self.u1_dir, 0500)
1463
1464 def test_upgrade_0(self):
1465 """Test the upgrade from the first shelf layout version."""
1466 self._build_layout_version_0()
1467 old_shelf = LegacyShareFileShelf(self.share_md_dir)
1414 # add the root_uuid key1468 # add the root_uuid key
1415 root_share = Share(path=self.root_dir)1469 root_share = _Share(path=self.root_dir)
1416 root_share.access_level = 'Modify'1470 root_share.access_level = 'Modify'
1417 old_shelf[''] = root_share1471 old_shelf[''] = root_share
1418 for idx in range(1, 10):1472 for idx in range(1, 10):
1419 old_shelf[str(uuid.uuid4())] = \1473 sid = str(uuid.uuid4())
1420 Share(path=os.path.join(self.shares_dir, str(idx)))1474 old_shelf[sid] = _Share(path=os.path.join(self.shares_dir, str(idx)),
1421 # LegacyShareFileShelf.keys returns a generator1475 share_id=sid)
1476 # ShareFileShelf.keys returns a generator
1422 old_keys = [key for key in old_shelf.keys()]1477 old_keys = [key for key in old_shelf.keys()]
1423 self.assertEquals(10, len(old_keys))1478 self.assertEquals(10, len(old_keys))
1479 if self.md_version_None:
1480 self.set_md_version('')
1481 # set the ro permissions
1482 self._set_permissions()
1424 # we want to keep a refernece to main in order to shutdown1483 # we want to keep a refernece to main in order to shutdown
1425 # pylint: disable-msg=W02011484 # pylint: disable-msg=W0201
1426 self.main = FakeMain(self.root_dir, self.shares_dir,1485 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1427 self.data_dir, self.partials_dir)1486 self.data_dir, self.partials_dir)
1428 new_keys = [new_key for new_key in self.main.vm.shares.keys()]1487 new_keys = [new_key for new_key in self.main.vm.shares.keys()]
1429 self.assertEquals(10, len(new_keys))1488 self.assertEquals(10, len(new_keys))
1430 for new_key in new_keys:1489 for new_key in new_keys:
1431 self.assertIn(new_key, old_keys)1490 self.assertIn(new_key, old_keys)
1432 # check the old data is still there (in the backup)1491 # check the old data is still there (in the backup)
1433 backup_shelf = LegacyShareFileShelf(os.path.join(vm_data_dir, '0.bkp'))1492 backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp'))
1434 backup_keys = [key for key in backup_shelf.keys()]1493 backup_keys = [key for key in backup_shelf.keys()]
1435 for old_key in old_keys:1494 for old_key in old_keys:
1436 self.assertIn(old_key, backup_keys)1495 self.assertIn(old_key, backup_keys)
@@ -1438,18 +1497,13 @@
1438 self.assertIn(new_key, backup_keys)1497 self.assertIn(new_key, backup_keys)
1439 self.check_version()1498 self.check_version()
14401499
1441 def test_1_to_2(self):1500 def test_upgrade_1(self):
1442 """ Test the upgrade from v.1 of the metadata to v.2"""1501 """ Test the upgrade from v.1"""
1443 # ensure a clean data_dir1502 self._build_layout_version_1()
1444 self.rmtree(self.data_dir)
1445 vm_data_dir = os.path.join(self.data_dir, 'vm')
1446 vm_shares_dir = os.path.join(vm_data_dir, 'shares')
1447 os.makedirs(vm_data_dir)
1448 # write the .version file with v.11503 # write the .version file with v.1
1449 with open(os.path.join(vm_data_dir, '.version'), 'w') as fd:1504 self.set_md_version('1')
1450 fd.write('1')
14511505
1452 share_file = os.path.join(vm_shares_dir,1506 share_file = os.path.join(self.share_md_dir,
1453 '0/6/6/0664f050-9254-45c5-9f31-3482858709e4')1507 '0/6/6/0664f050-9254-45c5-9f31-3482858709e4')
1454 os.makedirs(os.path.dirname(share_file))1508 os.makedirs(os.path.dirname(share_file))
1455 # this is the str of a version 2 pickle1509 # this is the str of a version 2 pickle
@@ -1465,222 +1519,228 @@
1465 with open(share_file, 'w') as fd:1519 with open(share_file, 'w') as fd:
1466 fd.write(share_value)1520 fd.write(share_value)
14671521
1468 # fake the old namespace
1469 sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager'] = \
1470 sys.modules['ubuntuone.syncdaemon.volume_manager']
1471 # try to load the shelf1522 # try to load the shelf
1472 old_shelf = LegacyShareFileShelf(vm_shares_dir)1523 old_shelf = LegacyShareFileShelf(self.share_md_dir)
1473 share = old_shelf['0664f050-9254-45c5-9f31-3482858709e4']1524 share = old_shelf['0664f050-9254-45c5-9f31-3482858709e4']
1474 self.assertTrue(share is not None)1525 self.assertTrue(share is not None)
1475 del sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager']1526 if self.md_version_None:
1527 self.set_md_version('')
1528
1529 self._set_permissions()
1476 # now use the real VolumeManager1530 # now use the real VolumeManager
1477 # we want to keep a refernece to main in order to shutdown1531 # we want to keep a refernece to main in order to shutdown
1478 # pylint: disable-msg=W02011532 # pylint: disable-msg=W0201
1479 self.main = FakeMain(self.root_dir, self.shares_dir,1533 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1480 self.data_dir, self.partials_dir)1534 self.data_dir, self.partials_dir)
1481 new_keys = [new_key for new_key in self.main.vm.shares.keys()]1535 new_keys = [new_key for new_key in self.main.vm.shares.keys()]
1482 self.assertEquals(2, len(new_keys)) # the fake share plus root1536 self.assertEquals(2, len(new_keys)) # the fake share plus root
1483 for key in ['', share.volume_id]:1537 for key in ['', share.id]:
1484 self.assertIn(key, new_keys)1538 self.assertIn(key, new_keys)
1485 self.check_version()1539 self.check_version()
14861540
1487 def test_2_to_3(self):1541 def test_upgrade_2(self):
1488 """ Test the upgrade from v.2 of the metadata to v.3"""1542 """Test the upgrade from v.2."""
1489 vm_data_dir = os.path.join(self.data_dir, 'vm')1543 self._build_layout_version_1()
1490 os.makedirs(vm_data_dir)1544 self.set_md_version('2')
1491 with open(os.path.join(vm_data_dir, '.version'), 'w') as fd:
1492 fd.write('2')
1493 open(self.root_dir + '/foo.conflict', 'w').close()1545 open(self.root_dir + '/foo.conflict', 'w').close()
1494 open(self.root_dir + '/foo.conflict.23', 'w').close()1546 open(self.root_dir + '/foo.conflict.23', 'w').close()
1495 open(self.shares_dir + '/bar.partial', 'w').close()1547 open(self.shares_dir + '/bar.partial', 'w').close()
1496 os.mkdir(self.shares_dir + '/baz/')1548 os.mkdir(self.shares_dir + '/baz/')
1497 open(self.shares_dir + '/baz/baz.conflict', 'w').close()1549 open(self.shares_dir + '/baz/baz.conflict', 'w').close()
1498 os.chmod(self.shares_dir + '/baz/', 0500)1550 os.chmod(self.shares_dir + '/baz/', 0500)
1499 self.main = FakeMain(self.root_dir, self.shares_dir,1551 if self.md_version_None:
1552 self.set_md_version('')
1553 self._set_permissions()
1554 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1500 self.data_dir, self.partials_dir)1555 self.data_dir, self.partials_dir)
1501 self.assertTrue(os.path.exists(self.root_dir + '/foo.u1conflict'))1556 self.assertTrue(os.path.exists(self.new_root_dir + '/foo.u1conflict'))
1502 self.assertTrue(os.path.exists(self.root_dir + '/foo.u1conflict.23'))1557 self.assertTrue(os.path.exists(self.new_root_dir + '/foo.u1conflict.23'))
1503 self.assertTrue(os.path.exists(self.shares_dir + '/.u1partial.bar'))1558 self.assertTrue(os.path.exists(self.new_shares_dir + '/.u1partial.bar'))
1504 self.assertTrue(os.path.exists(self.shares_dir + '/baz/baz.u1conflict'))1559 self.assertTrue(os.path.exists(self.new_shares_dir + '/baz/baz.u1conflict'))
1560 self.check_version()
15051561
1506 def test_2_to_3_more(self):1562 def test_upgrade_2_more(self):
1507 """ Test the upgrade from v.2 of the metadata to v.3 some more"""1563 """Test the upgrade from v.2 some more."""
1508 vm_data_dir = os.path.join(self.data_dir, 'vm')1564 self._build_layout_version_1()
1509 os.makedirs(vm_data_dir)1565 self.set_md_version('2')
1510 with open(os.path.join(vm_data_dir, '.version'), 'w') as fd:
1511 fd.write('2')
15121566
1513 expected = []1567 expected = []
15141568
1515 for dirname in self.root_dir, self.shares_dir:1569 for dirname, new_dirname in [(self.root_dir, self.new_root_dir),
1570 (self.shares_dir, self.new_shares_dir)]:
1516 # a plain .conflict...1571 # a plain .conflict...
1517 # ...on a file1572 # ...on a file
1518 open(dirname + '/1a.conflict', 'w').close()1573 open(dirname + '/1a.conflict', 'w').close()
1519 expected.append(dirname + '/1a.u1conflict')1574 expected.append(new_dirname + '/1a.u1conflict')
1520 # ...on an empty directory1575 # ...on an empty directory
1521 os.mkdir(dirname + '/1b.conflict')1576 os.mkdir(dirname + '/1b.conflict')
1522 expected.append(dirname + '/1b.u1conflict')1577 expected.append(new_dirname + '/1b.u1conflict')
1523 # ...on a directory with content1578 # ...on a directory with content
1524 os.mkdir(dirname + '/1c.conflict')1579 os.mkdir(dirname + '/1c.conflict')
1525 os.mkdir(dirname + '/1c.conflict/1c')1580 os.mkdir(dirname + '/1c.conflict/1c')
1526 expected.append(dirname + '/1c.u1conflict/1c')1581 expected.append(new_dirname + '/1c.u1conflict/1c')
1527 # ...in a readonly directory1582 # ...in a readonly directory
1528 os.mkdir(dirname + '/1d')1583 os.mkdir(dirname + '/1d')
1529 os.mkdir(dirname + '/1d/1d.conflict')1584 os.mkdir(dirname + '/1d/1d.conflict')
1530 os.chmod(dirname + '/1d', 0500)1585 os.chmod(dirname + '/1d', 0500)
1531 expected.append(dirname + '/1d/1d.u1conflict')1586 expected.append(new_dirname + '/1d/1d.u1conflict')
1532 # ...in a directory that is also a .conflict1587 # ...in a directory that is also a .conflict
1533 os.mkdir(dirname + '/1e.conflict')1588 os.mkdir(dirname + '/1e.conflict')
1534 os.mkdir(dirname + '/1e.conflict/1e.conflict')1589 os.mkdir(dirname + '/1e.conflict/1e.conflict')
1535 expected.append(dirname + '/1e.u1conflict/1e.u1conflict')1590 expected.append(new_dirname + '/1e.u1conflict/1e.u1conflict')
15361591
1537 # a numbered .conflict...1592 # a numbered .conflict...
1538 # ...on a file1593 # ...on a file
1539 open(dirname + '/2a.conflict.2', 'w').close()1594 open(dirname + '/2a.conflict.2', 'w').close()
1540 expected.append(dirname + '/2a.u1conflict.2')1595 expected.append(new_dirname + '/2a.u1conflict.2')
1541 # ...on an empty directory1596 # ...on an empty directory
1542 os.mkdir(dirname + '/2b.conflict.3')1597 os.mkdir(dirname + '/2b.conflict.3')
1543 expected.append(dirname + '/2b.u1conflict.3')1598 expected.append(new_dirname + '/2b.u1conflict.3')
1544 # ...on a directory with content1599 # ...on a directory with content
1545 os.mkdir(dirname + '/2c.conflict.4')1600 os.mkdir(dirname + '/2c.conflict.4')
1546 os.mkdir(dirname + '/2c.conflict.4/2c')1601 os.mkdir(dirname + '/2c.conflict.4/2c')
1547 expected.append(dirname + '/2c.u1conflict.4/2c')1602 expected.append(new_dirname + '/2c.u1conflict.4/2c')
1548 # ...in a readonly directory1603 # ...in a readonly directory
1549 os.mkdir(dirname + '/2d')1604 os.mkdir(dirname + '/2d')
1550 os.mkdir(dirname + '/2d/2d.conflict.5')1605 os.mkdir(dirname + '/2d/2d.conflict.5')
1551 os.chmod(dirname + '/2d', 0500)1606 os.chmod(dirname + '/2d', 0500)
1552 expected.append(dirname + '/2d/2d.u1conflict.5')1607 expected.append(new_dirname + '/2d/2d.u1conflict.5')
1553 # ...in a directory that is also a .conflict1608 # ...in a directory that is also a .conflict
1554 os.mkdir(dirname + '/2e.conflict')1609 os.mkdir(dirname + '/2e.conflict')
1555 os.mkdir(dirname + '/2e.conflict/2e.conflict.6')1610 os.mkdir(dirname + '/2e.conflict/2e.conflict.6')
1556 expected.append(dirname + '/2e.u1conflict/2e.u1conflict.6')1611 expected.append(new_dirname + '/2e.u1conflict/2e.u1conflict.6')
15571612
1558 # a plain .conflict of which there already exists a .u1conflict...1613 # a plain .conflict of which there already exists a .u1conflict...
1559 # ...on a file1614 # ...on a file
1560 open(dirname + '/3a.conflict', 'w').close()1615 open(dirname + '/3a.conflict', 'w').close()
1561 open(dirname + '/3a.u1conflict', 'w').close()1616 open(dirname + '/3a.u1conflict', 'w').close()
1562 expected.append(dirname + '/3a.u1conflict')1617 expected.append(new_dirname + '/3a.u1conflict')
1563 expected.append(dirname + '/3a.u1conflict.1')1618 expected.append(new_dirname + '/3a.u1conflict.1')
1564 # ...on an empty directory1619 # ...on an empty directory
1565 os.mkdir(dirname + '/3b.conflict')1620 os.mkdir(dirname + '/3b.conflict')
1566 os.mkdir(dirname + '/3b.u1conflict')1621 os.mkdir(dirname + '/3b.u1conflict')
1567 expected.append(dirname + '/3b.u1conflict')1622 expected.append(new_dirname + '/3b.u1conflict')
1568 expected.append(dirname + '/3b.u1conflict.1')1623 expected.append(new_dirname + '/3b.u1conflict.1')
1569 # ...on a directory with content1624 # ...on a directory with content
1570 os.mkdir(dirname + '/3c.conflict')1625 os.mkdir(dirname + '/3c.conflict')
1571 os.mkdir(dirname + '/3c.conflict/3c')1626 os.mkdir(dirname + '/3c.conflict/3c')
1572 os.mkdir(dirname + '/3c.u1conflict')1627 os.mkdir(dirname + '/3c.u1conflict')
1573 os.mkdir(dirname + '/3c.u1conflict/3c2')1628 os.mkdir(dirname + '/3c.u1conflict/3c2')
1574 expected.append(dirname + '/3c.u1conflict.1/3c')1629 expected.append(new_dirname + '/3c.u1conflict.1/3c')
1575 expected.append(dirname + '/3c.u1conflict/3c2')1630 expected.append(new_dirname + '/3c.u1conflict/3c2')
1576 # ...in a readonly directory1631 # ...in a readonly directory
1577 os.mkdir(dirname + '/3d')1632 os.mkdir(dirname + '/3d')
1578 os.mkdir(dirname + '/3d/3d.conflict')1633 os.mkdir(dirname + '/3d/3d.conflict')
1579 os.mkdir(dirname + '/3d/3d.u1conflict')1634 os.mkdir(dirname + '/3d/3d.u1conflict')
1580 os.mkdir(dirname + '/3d/3d.u1conflict/3d')1635 os.mkdir(dirname + '/3d/3d.u1conflict/3d')
1581 os.chmod(dirname + '/3d', 0500)1636 os.chmod(dirname + '/3d', 0500)
1582 expected.append(dirname + '/3d/3d.u1conflict/3d')1637 expected.append(new_dirname + '/3d/3d.u1conflict/3d')
1583 expected.append(dirname + '/3d/3d.u1conflict.1')1638 expected.append(new_dirname + '/3d/3d.u1conflict.1')
1584 # ...in a directory that is also a .conflict1639 # ...in a directory that is also a .conflict
1585 os.mkdir(dirname + '/3e.conflict')1640 os.mkdir(dirname + '/3e.conflict')
1586 os.mkdir(dirname + '/3e.conflict/3e.conflict')1641 os.mkdir(dirname + '/3e.conflict/3e.conflict')
1587 os.mkdir(dirname + '/3e.conflict/3e.u1conflict')1642 os.mkdir(dirname + '/3e.conflict/3e.u1conflict')
1588 os.mkdir(dirname + '/3e.conflict/3e.u1conflict/3e')1643 os.mkdir(dirname + '/3e.conflict/3e.u1conflict/3e')
1589 expected.append(dirname + '/3e.u1conflict/3e.u1conflict/3e')1644 expected.append(new_dirname + '/3e.u1conflict/3e.u1conflict/3e')
1590 expected.append(dirname + '/3e.u1conflict/3e.u1conflict.1')1645 expected.append(new_dirname + '/3e.u1conflict/3e.u1conflict.1')
15911646
1592 # a numbered .conflict of which there already exists a .u1conflict...1647 # a numbered .conflict of which there already exists a .u1conflict...
1593 # ...on a file1648 # ...on a file
1594 open(dirname + '/4a.conflict.1', 'w').close()1649 open(dirname + '/4a.conflict.1', 'w').close()
1595 open(dirname + '/4a.u1conflict.1', 'w').close()1650 open(dirname + '/4a.u1conflict.1', 'w').close()
1596 expected.append(dirname + '/4a.u1conflict.1')1651 expected.append(new_dirname + '/4a.u1conflict.1')
1597 expected.append(dirname + '/4a.u1conflict.2')1652 expected.append(new_dirname + '/4a.u1conflict.2')
1598 # ...on an empty directory1653 # ...on an empty directory
1599 os.mkdir(dirname + '/4b.conflict.2')1654 os.mkdir(dirname + '/4b.conflict.2')
1600 os.mkdir(dirname + '/4b.u1conflict.2')1655 os.mkdir(dirname + '/4b.u1conflict.2')
1601 expected.append(dirname + '/4b.u1conflict.2')1656 expected.append(new_dirname + '/4b.u1conflict.2')
1602 expected.append(dirname + '/4b.u1conflict.3')1657 expected.append(new_dirname + '/4b.u1conflict.3')
1603 # ...on a directory with content1658 # ...on a directory with content
1604 os.mkdir(dirname + '/4c.conflict.3')1659 os.mkdir(dirname + '/4c.conflict.3')
1605 os.mkdir(dirname + '/4c.conflict.3/4c')1660 os.mkdir(dirname + '/4c.conflict.3/4c')
1606 os.mkdir(dirname + '/4c.u1conflict.3')1661 os.mkdir(dirname + '/4c.u1conflict.3')
1607 expected.append(dirname + '/4c.u1conflict.4/4c')1662 expected.append(new_dirname + '/4c.u1conflict.4/4c')
1608 expected.append(dirname + '/4c.u1conflict.3')1663 expected.append(new_dirname + '/4c.u1conflict.3')
1609 # ...in a readonly directory1664 # ...in a readonly directory
1610 os.mkdir(dirname + '/4d')1665 os.mkdir(dirname + '/4d')
1611 os.mkdir(dirname + '/4d/4d.conflict.4')1666 os.mkdir(dirname + '/4d/4d.conflict.4')
1612 os.mkdir(dirname + '/4d/4d.u1conflict.4')1667 os.mkdir(dirname + '/4d/4d.u1conflict.4')
1613 os.chmod(dirname + '/4d', 0500)1668 os.chmod(dirname + '/4d', 0500)
1614 expected.append(dirname + '/4d/4d.u1conflict.4')1669 expected.append(new_dirname + '/4d/4d.u1conflict.4')
1615 expected.append(dirname + '/4d/4d.u1conflict.5')1670 expected.append(new_dirname + '/4d/4d.u1conflict.5')
1616 # ...in a directory that is also a .conflict1671 # ...in a directory that is also a .conflict
1617 os.mkdir(dirname + '/4e.conflict')1672 os.mkdir(dirname + '/4e.conflict')
1618 os.mkdir(dirname + '/4e.conflict/4e.conflict.5')1673 os.mkdir(dirname + '/4e.conflict/4e.conflict.5')
1619 os.mkdir(dirname + '/4e.conflict/4e.u1conflict.5')1674 os.mkdir(dirname + '/4e.conflict/4e.u1conflict.5')
1620 expected.append(dirname + '/4e.u1conflict/4e.u1conflict.5')1675 expected.append(new_dirname + '/4e.u1conflict/4e.u1conflict.5')
1621 expected.append(dirname + '/4e.u1conflict/4e.u1conflict.6')1676 expected.append(new_dirname + '/4e.u1conflict/4e.u1conflict.6')
16221677
1623 # a plain .partial...1678 # a plain .partial...
1624 # ...of a file1679 # ...of a file
1625 open(dirname + '/5a.partial', 'w').close()1680 open(dirname + '/5a.partial', 'w').close()
1626 expected.append(dirname + '/.u1partial.5a')1681 expected.append(new_dirname + '/.u1partial.5a')
1627 # ...of a directory1682 # ...of a directory
1628 os.mkdir(dirname + '/5b')1683 os.mkdir(dirname + '/5b')
1629 open(dirname + '/5b/.partial', 'w').close()1684 open(dirname + '/5b/.partial', 'w').close()
1630 expected.append(dirname + '/5b/.u1partial')1685 expected.append(new_dirname + '/5b/.u1partial')
1631 # ...of a readonly directory1686 # ...of a readonly directory
1632 os.mkdir(dirname + '/5c')1687 os.mkdir(dirname + '/5c')
1633 open(dirname + '/5c/.partial', 'w').close()1688 open(dirname + '/5c/.partial', 'w').close()
1634 os.chmod(dirname + '/5c', 0500)1689 os.chmod(dirname + '/5c', 0500)
1635 expected.append(dirname + '/5c/.u1partial')1690 expected.append(new_dirname + '/5c/.u1partial')
16361691
1637 # a plain .partial of which there already exists a .u1partial...1692 # a plain .partial of which there already exists a .u1partial...
1638 # ...of a file1693 # ...of a file
1639 open(dirname + '/6a.partial', 'w').close()1694 open(dirname + '/6a.partial', 'w').close()
1640 open(dirname + '/.u1partial.6a', 'w').close()1695 open(dirname + '/.u1partial.6a', 'w').close()
1641 expected.append(dirname + '/.u1partial.6a')1696 expected.append(new_dirname + '/.u1partial.6a')
1642 expected.append(dirname + '/.u1partial.6a.1')1697 expected.append(new_dirname + '/.u1partial.6a.1')
1643 # ...of a directory1698 # ...of a directory
1644 os.mkdir(dirname + '/6b')1699 os.mkdir(dirname + '/6b')
1645 open(dirname + '/6b/.partial', 'w').close()1700 open(dirname + '/6b/.partial', 'w').close()
1646 open(dirname + '/6b/.u1partial', 'w').close()1701 open(dirname + '/6b/.u1partial', 'w').close()
1647 expected.append(dirname + '/6b/.u1partial')1702 expected.append(new_dirname + '/6b/.u1partial')
1648 expected.append(dirname + '/6b/.u1partial.1')1703 expected.append(new_dirname + '/6b/.u1partial.1')
1649 # ...of a readonly directory1704 # ...of a readonly directory
1650 os.mkdir(dirname + '/6c')1705 os.mkdir(dirname + '/6c')
1651 open(dirname + '/6c/.partial', 'w').close()1706 open(dirname + '/6c/.partial', 'w').close()
1652 open(dirname + '/6c/.u1partial', 'w').close()1707 open(dirname + '/6c/.u1partial', 'w').close()
1653 os.chmod(dirname + '/6c', 0500)1708 os.chmod(dirname + '/6c', 0500)
1654 expected.append(dirname + '/6c/.u1partial')1709 expected.append(new_dirname + '/6c/.u1partial')
1655 expected.append(dirname + '/6c/.u1partial.1')1710 expected.append(new_dirname + '/6c/.u1partial.1')
16561711
1657 self.main = FakeMain(self.root_dir, self.shares_dir,1712 self._set_permissions()
1713 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1658 self.data_dir, self.partials_dir)1714 self.data_dir, self.partials_dir)
16591715
1660 for path in expected:1716 for path in expected:
1661 self.assertTrue(os.path.exists(path), 'missing ' + path)1717 self.assertTrue(os.path.exists(path), 'missing ' + path)
1718 self.check_version()
16621719
1663 def test_missing_version_file_with_version_non_0(self):1720 def test_missing_version_file_with_version_non_0(self):
1664 """ Test the upgrade from the first shelf layout version to v31721 """Test the upgrade from the first shelf layout version
1665 while the metadata sould be in v3 format1722 while the metadata sould be in v3 or greater format.
1723
1666 """1724 """
1667 # ensure a clean data_dir1725 self._build_layout_version_1()
1668 self.rmtree(self.data_dir)1726 maybe_old_shelf = LegacyShareFileShelf(self.share_md_dir)
1669 vm_data_dir = os.path.join(self.data_dir, 'vm', 'shares')
1670 maybe_old_shelf = LegacyShareFileShelf(vm_data_dir)
1671 # add the root_uuid key1727 # add the root_uuid key
1672 root_share = Share(path=self.root_dir)1728 root_share = _Share(self.root_dir)
1673 root_share.access_level = 'Modify'1729 root_share.access_level = 'Modify'
1674 maybe_old_shelf[''] = root_share1730 maybe_old_shelf[''] = root_share
1675 for idx in range(1, 10):1731 for idx in range(1, 10):
1676 maybe_old_shelf[str(uuid.uuid4())] = \1732 share_id = str(uuid.uuid4())
1677 Share(path=os.path.join(self.shares_dir, str(idx)))1733 maybe_old_shelf[share_id] = \
1734 _Share(share_id=share_id,
1735 path=os.path.join(self.shares_dir, str(idx)))
1678 # ShareFileShelf.keys returns a generator1736 # ShareFileShelf.keys returns a generator
1679 maybe_old_keys = [key for key in maybe_old_shelf.keys()]1737 maybe_old_keys = [key for key in maybe_old_shelf.keys()]
1680 self.assertEquals(10, len(maybe_old_keys))1738 self.assertEquals(10, len(maybe_old_keys))
1739 if self.md_version_None:
1740 self.set_md_version('')
1681 # we want to keep a refernece to main in order to shutdown1741 # we want to keep a refernece to main in order to shutdown
1682 # pylint: disable-msg=W02011742 # pylint: disable-msg=W0201
1683 self.main = FakeMain(self.root_dir, self.shares_dir,1743 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1684 self.data_dir, self.partials_dir)1744 self.data_dir, self.partials_dir)
1685 new_keys = [new_key for new_key in self.main.vm.shares.keys()]1745 new_keys = [new_key for new_key in self.main.vm.shares.keys()]
1686 self.assertEquals(10, len(new_keys))1746 self.assertEquals(10, len(new_keys))
@@ -1689,160 +1749,444 @@
1689 # as we didn't actually upgrade the shelf, just the .version file1749 # as we didn't actually upgrade the shelf, just the .version file
1690 # check the empty 0.bkp1750 # check the empty 0.bkp
1691 # check the old data is still there (in the backup)1751 # check the old data is still there (in the backup)
1692 backup_shelf = LegacyShareFileShelf(os.path.join(vm_data_dir, '0.bkp'))1752 backup_shelf = LegacyShareFileShelf(os.path.join(self.vm_data_dir, '0.bkp'))
1693 backup_keys = [key for key in backup_shelf.keys()]1753 backup_keys = [key for key in backup_shelf.keys()]
1694 self.assertEquals(0, len(backup_keys))1754 self.assertEquals(0, len(backup_keys))
1695 self.check_version()1755 self.check_version()
16961756
1697 def test_3_to_4(self):1757 def test_upgrade_3(self):
1698 """upgrade from version 3 to 4"""1758 """Test upgrade from version 3."""
1699 vm_data_dir = os.path.join(self.data_dir, 'vm')1759 self._build_layout_version_1()
1700 os.makedirs(vm_data_dir)1760 self.set_md_version('3')
1701 with open(os.path.join(vm_data_dir, '.version'), 'w') as fd:1761 # create a dir in the root
1702 fd.write('3')1762 os.makedirs(os.path.join(self.root_dir, 'test_dir'))
1703 os.rmdir(self.shares_dir)
1704 # build the old layout
1705 old_root = os.path.join(self.root_dir, 'My Files')
1706 old_shares = os.path.join(self.root_dir, 'Shared With Me')
1707 os.makedirs(os.path.join(old_root, 'test_dir'))
1708 open(os.path.join(old_root, 'test_file'), 'w').close()
1709 # create a file in the root1763 # create a file in the root
1710 open(os.path.join(self.root_dir, 'test_file'), 'w').close()1764 open(os.path.join(self.root_dir, 'test_file'), 'w').close()
1711 share_path = os.path.join(old_shares, 'Bla from Foo')1765 # create a file in the new root
1766 open(os.path.join(self.new_root_dir, 'test_file'), 'w').close()
1767 share_path = os.path.join(self.shares_dir, 'Bla from Foo')
1712 os.makedirs(share_path)1768 os.makedirs(share_path)
1713 os.makedirs(os.path.join(share_path, 'test_dir'))1769 os.makedirs(os.path.join(share_path, 'test_dir'))
1714 open(os.path.join(share_path, 'test_file'), 'w').close()1770 open(os.path.join(share_path, 'test_file'), 'w').close()
1715 # fix permissions1771 # fix permissions
1716 os.chmod(self.root_dir, 0555)1772 self._set_permissions()
1717 os.chmod(old_shares, 0555)1773 if self.md_version_None:
1774 self.set_md_version('')
1718 # migrate the data1775 # migrate the data
1719 self.main = FakeMain(self.root_dir, self.shares_dir,1776 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1720 self.data_dir, self.partials_dir)1777 self.data_dir, self.partials_dir)
1721 self.assertFalse(os.path.exists(old_root))1778 self.assertFalse(os.path.exists(self.root_dir))
1722 self.assertTrue(os.path.exists(old_shares))1779 self.assertTrue(os.path.exists(self.shares_dir))
1723 self.assertTrue(os.path.islink(old_shares))1780 self.assertTrue(os.path.islink(self.shares_dir), self.shares_dir)
1724 self.assertEquals(old_shares, self.main.shares_dir_link)1781 self.assertEquals(self.shares_dir, self.main.shares_dir_link)
1725 self.assertTrue(os.path.exists(os.path.join(self.root_dir,1782 self.assertTrue(os.path.exists(os.path.join(self.new_root_dir,
1726 'test_dir')))1783 'test_dir')))
1727 self.assertTrue(os.path.exists(os.path.join(self.root_dir,1784 self.assertTrue(os.path.exists(os.path.join(self.new_root_dir,
1728 'test_file')))1785 'test_file')))
1729 self.assertTrue(os.path.exists(os.path.join(self.root_dir,1786 self.assertTrue(os.path.exists(os.path.join(self.new_root_dir,
1730 'test_file.u1conflict')))1787 'test_file.u1conflict')))
1731 self.assertTrue(os.path.exists(share_path))1788 self.assertTrue(os.path.exists(share_path))
1732 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_dir')))1789 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_dir')))
1733 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_file')))1790 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_file')))
1791 self.check_version()
17341792
1735 def test_3_to_4_with_symlink_in_myfiles(self):1793 def test_upgrade_3_with_symlink_in_myfiles(self):
1736 """upgrade from version 3 to 4"""1794 """Test upgrade from version 3 with symlink in 'My Files'."""
1737 vm_data_dir = os.path.join(self.data_dir, 'vm')1795 self._build_layout_version_1()
1738 os.makedirs(vm_data_dir)1796 self.set_md_version('3')
1739 with open(os.path.join(vm_data_dir, '.version'), 'w') as fd:
1740 fd.write('3')
1741 os.rmdir(self.shares_dir)
1742 # build the old layout1797 # build the old layout
1743 old_root = os.path.join(self.root_dir, 'My Files')1798 os.makedirs(os.path.join(self.root_dir, 'test_dir'))
1744 old_shares = os.path.join(self.root_dir, 'Shared With Me')1799 open(os.path.join(self.root_dir, 'test_file'), 'w').close()
1745 os.makedirs(os.path.join(old_root, 'test_dir'))
1746 open(os.path.join(old_root, 'test_file'), 'w').close()
1747 # create a file in the root1800 # create a file in the root
1748 open(os.path.join(self.root_dir, 'test_file'), 'w').close()1801 open(os.path.join(self.new_root_dir, 'test_file'), 'w').close()
1749 share_path = os.path.join(old_shares, 'Bla from Foo')1802 share_path = os.path.join(self.shares_dir, 'Bla from Foo')
1750 os.makedirs(share_path)1803 os.makedirs(share_path)
1751 os.makedirs(os.path.join(share_path, 'test_dir'))1804 os.makedirs(os.path.join(share_path, 'test_dir'))
1752 open(os.path.join(share_path, 'test_file'), 'w').close()1805 open(os.path.join(share_path, 'test_file'), 'w').close()
1753 # create the Shared with Me symlink in My Files1806 # create the Shared with Me symlink in My Files
1754 os.symlink(old_shares, os.path.join(old_root, 'Shared With Me'))1807 os.symlink(self.shares_dir, os.path.join(self.root_dir,
1808 "Shared With Me"))
1755 # fix permissions1809 # fix permissions
1756 os.chmod(self.root_dir, 0555)1810 self._set_permissions()
1757 os.chmod(old_shares, 0555)1811 if self.md_version_None:
1812 self.set_md_version('')
1758 # migrate the data1813 # migrate the data
1759 self.shares_dir = os.path.join(self.tmpdir, 'shares')1814 self.main = FakeMain(self.new_root_dir, self.new_shares_dir,
1760 self.main = FakeMain(self.root_dir, self.shares_dir,
1761 self.data_dir, self.partials_dir)1815 self.data_dir, self.partials_dir)
1762 self.assertFalse(os.path.exists(old_root))1816 self.assertFalse(os.path.exists(self.root_dir))
1763 self.assertTrue(os.path.exists(old_shares))1817 self.assertTrue(os.path.exists(self.shares_dir))
1764 self.assertTrue(os.path.islink(old_shares))1818 self.assertTrue(os.path.islink(self.shares_dir))
1765 self.assertEquals(old_shares, self.main.shares_dir_link)1819 self.assertEquals(self.shares_dir, self.main.shares_dir_link)
1766 self.assertTrue(os.path.exists(os.path.join(self.root_dir,1820 self.assertTrue(os.path.exists(os.path.join(self.new_root_dir,
1767 'test_dir')))1821 'test_dir')))
1768 self.assertTrue(os.path.exists(os.path.join(self.root_dir,1822 self.assertTrue(os.path.exists(os.path.join(self.new_root_dir,
1769 'test_file')))1823 'test_file')))
1770 self.assertTrue(os.path.exists(os.path.join(self.root_dir,1824 self.assertTrue(os.path.exists(os.path.join(self.new_root_dir,
1771 'test_file.u1conflict')))1825 'test_file.u1conflict')))
1772 self.assertTrue(os.path.exists(share_path))1826 self.assertTrue(os.path.exists(share_path))
1773 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_dir')))1827 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_dir')))
1774 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_file')))1828 self.assertTrue(os.path.exists(os.path.join(share_path, 'test_file')))
1775 self.assertEquals(self.main.shares_dir,1829 self.assertEquals(self.main.shares_dir,
1776 os.readlink(self.main.shares_dir_link))1830 os.readlink(self.main.shares_dir_link))
17771831 self.check_version()
1778 def test_None_to_4(self):1832
1779 """upgrade from version None to 4 (possibly a clean start)"""1833
1780 VolumeManager.METADATA_VERSION = '4'1834class MetadataNewLayoutTests(MetadataTestCase):
1781 vm_data_dir = os.path.join(self.data_dir, 'vm')1835 """Test for 'new' layout and metadata upgrade."""
1782 version_file = os.path.join(vm_data_dir, '.version')1836
1783 if os.path.exists(version_file):1837 def setUp(self):
1784 os.remove(version_file)1838 MetadataTestCase.setUp(self)
1785 os.rmdir(self.shares_dir)1839 self.share_md_dir = os.path.join(self.vm_data_dir, 'shares')
1786 os.rmdir(self.root_dir)1840 self.shared_md_dir = os.path.join(self.vm_data_dir, 'shared')
1841 self.home_dir = os.path.join(self.tmpdir, 'home', 'ubuntuonehacker')
1842 self.u1_dir = os.path.join(self.home_dir, os.path.split(self.u1_dir)[1])
1843 self.root_dir = self.u1_dir
1844 self.shares_dir = os.path.join(self.tmpdir, 'shares')
1845 self.shares_dir_link = os.path.join(self.u1_dir, 'Shared With Me')
1846
1847 def _build_layout_version_4(self):
1848 """Build the directory structure to mimic md v.4/5."""
1849 os.makedirs(self.share_md_dir)
1850 os.makedirs(self.shared_md_dir)
1851 os.makedirs(self.root_dir)
1852 os.makedirs(self.shares_dir)
1853 os.symlink(self.shares_dir, self.shares_dir_link)
1854
1855 def _fix_permissions(self):
1856 """Fix shares dir permissions, making it read-only."""
1857 os.chmod(self.shares_dir, 0500)
1858
1859 def test_upgrade_None_to_last(self):
1860 """Upgrade from version 'None' (possibly a clean start)."""
1787 old_root = os.path.join(self.root_dir, 'My Files')1861 old_root = os.path.join(self.root_dir, 'My Files')
1788 old_shares = os.path.join(self.root_dir, 'Shared With Me')1862 old_shares = os.path.join(self.root_dir, 'Shared With Me')
1789 # start and check that everything is ok1863 # start and check that everything is ok
1790 self.main = FakeMain(self.root_dir, self.shares_dir,1864 self.main = FakeMain(self.root_dir, self.shares_dir,
1791 self.data_dir, self.partials_dir)1865 self.data_dir, self.partials_dir)
1792 self.assertFalse(os.path.exists(old_root))1866 self.assertFalse(os.path.exists(old_root))
1867 self.assertTrue(os.path.exists(self.root_dir))
1793 self.assertTrue(os.path.exists(old_shares))1868 self.assertTrue(os.path.exists(old_shares))
1794 self.assertTrue(os.path.islink(old_shares))1869 self.assertTrue(os.path.islink(old_shares))
1795 self.assertEquals(old_shares, self.main.shares_dir_link)1870 self.assertEquals(old_shares, self.main.shares_dir_link)
1796 if os.path.exists(version_file):1871 self.check_version()
1797 with open(os.path.join(vm_data_dir, '.version'), 'r') as fd:1872
1798 self.assertEquals('4', fd.read())1873 def test_upgrade_None_to_last_phantom_share_path(self):
1799 else:1874 """Upgrade from version 'None' (possibly a clean start) with a root
1800 self.fail('missing .version file')1875 with missing path.
18011876
1802 def test_None_to_4_phantom_share_path(self):1877 """
1803 """upgrade from version None to 4 (possibly a clean start)"""
1804 VolumeManager.METADATA_VERSION = '4'
1805 vm_data_dir = os.path.join(self.data_dir, 'vm')
1806 version_file = os.path.join(vm_data_dir, '.version')
1807 if os.path.exists(version_file):
1808 os.remove(version_file)
1809 os.rmdir(self.shares_dir)
1810 os.rmdir(self.root_dir)
1811 old_root = os.path.join(self.root_dir, 'My Files')1878 old_root = os.path.join(self.root_dir, 'My Files')
1812 old_shares = os.path.join(self.root_dir, 'Shared With Me')1879 old_shares = os.path.join(self.root_dir, 'Shared With Me')
1813 # start and check that everything is ok
1814 self.main = FakeMain(self.root_dir, self.shares_dir,1880 self.main = FakeMain(self.root_dir, self.shares_dir,
1815 self.data_dir, self.partials_dir)1881 self.data_dir, self.partials_dir)
1816 root = self.main.vm.shares['']1882 self.main.shutdown()
1883 self.rmtree(self.vm_data_dir)
1884 os.makedirs(self.vm_data_dir)
1885 self.set_md_version('')
1886 shares = LegacyShareFileShelf(self.share_md_dir)
1887 root_share = _Share(self.root_dir)
1888 root_share.access_level = 'Modify'
1817 # set None to the share path1889 # set None to the share path
1818 root.path = None1890 root_share.path = None
1819 self.main.vm.shares['test'] = root1891 shares[''] = root_share
1820 if os.path.exists(version_file):1892
1821 os.remove(version_file)1893 if self.md_version_None:
1822 self.main.shutdown()1894 self.set_md_version('')
1823 # check that it's all OK1895 # check that it's all OK
1824 self.main = FakeMain(self.root_dir, self.shares_dir,1896 self.main = FakeMain(self.root_dir, self.shares_dir,
1825 self.data_dir, self.partials_dir)1897 self.data_dir, self.partials_dir)
1826 self.assertFalse(os.path.exists(old_root))1898 self.assertFalse(os.path.exists(old_root))
1827 self.assertTrue(os.path.exists(old_shares))1899 self.assertTrue(os.path.exists(self.root_dir))
1900 self.assertTrue(os.path.exists(self.shares_dir))
1828 self.assertTrue(os.path.islink(old_shares))1901 self.assertTrue(os.path.islink(old_shares))
1829 self.assertEquals(old_shares, self.main.shares_dir_link)1902 self.assertEquals(old_shares, self.main.shares_dir_link)
1830 if os.path.exists(version_file):1903 self.check_version()
1831 with open(os.path.join(vm_data_dir, '.version'), 'r') as fd:1904
1832 self.assertEquals('4', fd.read())1905 def test_upgrade_4(self):
1833 else:1906 """Test migration from 4 to 5 (broken symlink in the root)."""
1834 self.fail('missing .version file')1907 self._build_layout_version_4()
18351908 self.set_md_version('4')
1836 def test_4_to_5(self):1909 # break the symlink
1837 """test migration from 4 to 5 (broken symlink in the root)"""1910 if os.path.exists(self.shares_dir_link):
1838 vm_data_dir = os.path.join(self.data_dir, 'vm')1911 os.unlink(self.shares_dir_link)
1839 os.makedirs(vm_data_dir)1912 os.symlink(self.shares_dir_link, self.shares_dir_link)
1840 with open(os.path.join(vm_data_dir, '.version'), 'w') as fd:1913
1841 fd.write('4')1914 if self.md_version_None:
1842 # build the new layout with a broken symlink1915 self.set_md_version('')
1843 shares_link = os.path.join(self.root_dir, 'Shared With Me')
1844 os.symlink(shares_link, shares_link)
1845 self.main = FakeMain(self.root_dir, self.shares_dir,1916 self.main = FakeMain(self.root_dir, self.shares_dir,
1846 self.data_dir, self.partials_dir)1917 self.data_dir, self.partials_dir)
1847 self.assertEquals(self.main.shares_dir,1918 self.assertEquals(self.main.shares_dir,
1848 os.readlink(self.main.shares_dir_link))1919 os.readlink(self.main.shares_dir_link))
1920 self.check_version()
1921
1922 def test_upgrade_5(self):
1923 """Test the migration from version 5."""
1924 # build a fake version 5 state
1925 self._build_layout_version_4()
1926 self.set_md_version('5')
1927 # create some old shares and shared metadata
1928 legacy_shares = LegacyShareFileShelf(self.share_md_dir)
1929 root_share = _Share(path=self.root_dir, share_id='',
1930 access_level='Modify')
1931 legacy_shares[''] = root_share
1932 for idx, name in enumerate(['share'] * 1000):
1933 sid = str(uuid.uuid4())
1934 share_name = name + '_' + str(idx)
1935 share = _Share(path=os.path.join(self.shares_dir, share_name),
1936 share_id=sid, name=share_name,
1937 node_id=str(uuid.uuid4()),
1938 other_username='username'+str(idx),
1939 other_visible_name='visible name ' + str(idx))
1940 if idx % 2:
1941 share.access_level = 'Modify'
1942 else:
1943 share.access_level = 'View'
1944 legacy_shares[sid] = share
1945
1946 # create shared shares
1947 legacy_shared = LegacyShareFileShelf(self.shared_md_dir)
1948 for idx, name in enumerate(['dir'] * 5):
1949 sid = str(uuid.uuid4())
1950 share_name = name + '_' + str(idx)
1951 share = _Share(path=os.path.join(self.root_dir, share_name),
1952 share_id=sid, node_id=str(uuid.uuid4()),
1953 name=share_name, other_username='hola',
1954 other_visible_name='hola')
1955 if idx % 2:
1956 share.access_level = 'Modify'
1957 else:
1958 share.access_level = 'View'
1959 legacy_shared[sid] = share
1960
1961 # keep a copy of the current shares and shared metadata to check
1962 # the upgrade went ok
1963 legacy_shares = dict(legacy_shares.items())
1964 legacy_shared = dict(legacy_shared.items())
1965
1966 if self.md_version_None:
1967 self.set_md_version('')
1968 # upgrade it!
1969 self.main = FakeMain(self.root_dir, self.shares_dir,
1970 self.data_dir, self.partials_dir)
1971 vm = self.main.vm
1972 def compare_share(share, old_share):
1973 """Compare two shares, new and old"""
1974 self.assertEquals(share.volume_id, old_share.id)
1975 self.assertEquals(share.path, old_share.path)
1976 self.assertEquals(share.node_id, old_share.subtree)
1977 if not isinstance(share, Root):
1978 self.assertEquals(share.name, old_share.name)
1979 self.assertEquals(share.other_username, old_share.other_username)
1980 self.assertEquals(share.other_visible_name, old_share.other_visible_name)
1981 self.assertEquals(share.access_level, old_share.access_level)
1982
1983 for sid in vm.shares:
1984 old_share = legacy_shares[sid]
1985 share = vm.shares[sid]
1986 self.assertTrue(isinstance(share, Share) or isinstance(share, Root))
1987 compare_share(share, old_share)
1988
1989 for sid in vm.shared:
1990 old_share = legacy_shared[sid]
1991 share = vm.shared[sid]
1992 self.assertTrue(isinstance(share, Shared))
1993 compare_share(share, old_share)
1994
1995 def test_upgrade_5_with_udfs(self):
1996 """Test the migration from version 5 with old UDFs."""
1997 # build a fake version 5 state
1998 self._build_layout_version_4()
1999 self.set_md_version('5')
2000 self.udfs_md_dir = os.path.join(self.vm_data_dir, 'udfs')
2001 # create some old shares and shared metadata
2002 legacy_shares = LegacyShareFileShelf(self.share_md_dir)
2003 root_share = _Share(path=self.root_dir, share_id='',
2004 access_level='Modify')
2005 legacy_shares[''] = root_share
2006 for idx, name in enumerate(['share'] * 1000):
2007 sid = str(uuid.uuid4())
2008 share_name = name + '_' + str(idx)
2009 share = _Share(path=os.path.join(self.shares_dir, share_name),
2010 share_id=sid, name=share_name,
2011 node_id=str(uuid.uuid4()),
2012 other_username='username'+str(idx),
2013 other_visible_name='visible name ' + str(idx))
2014 if idx % 2:
2015 share.access_level = 'Modify'
2016 else:
2017 share.access_level = 'View'
2018 legacy_shares[sid] = share
2019
2020 # create shared shares
2021 legacy_shared = LegacyShareFileShelf(self.shared_md_dir)
2022 for idx, name in enumerate(['dir'] * 5):
2023 sid = str(uuid.uuid4())
2024 share_name = name + '_' + str(idx)
2025 share = _Share(path=os.path.join(self.root_dir, share_name),
2026 share_id=sid, node_id=str(uuid.uuid4()),
2027 name=share_name, other_username='hola',
2028 other_visible_name='hola')
2029 if idx % 2:
2030 share.access_level = 'Modify'
2031 else:
2032 share.access_level = 'View'
2033 legacy_shared[sid] = share
2034
2035 # create some udfs
2036 legacy_udfs = LegacyShareFileShelf(self.udfs_md_dir)
2037 for idx, name in enumerate(['dir'] * 5):
2038 udf_id = str(uuid.uuid4())
2039 udf_name = name + '_' + str(idx)
2040 udf = _UDF(udf_id, str(uuid.uuid4()), '~/' + udf_name,
2041 os.path.join(self.home_dir, udf_name))
2042 if idx % 2:
2043 udf.subscribed = True
2044 else:
2045 udf.subscribed = False
2046 legacy_udfs[sid] = udf
2047
2048 # keep a copy of the current shares and shared metadata to check
2049 # the upgrade went ok
2050 legacy_shares = dict(legacy_shares.items())
2051 legacy_shared = dict(legacy_shared.items())
2052 legacy_udfs = dict(legacy_udfs.items())
2053
2054 if self.md_version_None:
2055 self.set_md_version('')
2056 # upgrade it!
2057 self.main = FakeMain(self.root_dir, self.shares_dir,
2058 self.data_dir, self.partials_dir)
2059 vm = self.main.vm
2060 def compare_share(share, old_share):
2061 """Compare two shares, new and old"""
2062 self.assertEquals(share.volume_id, old_share.id)
2063 self.assertEquals(share.path, old_share.path)
2064 self.assertEquals(share.node_id, old_share.subtree)
2065 if not isinstance(share, Root):
2066 self.assertEquals(share.name, old_share.name)
2067 self.assertEquals(share.other_username, old_share.other_username)
2068 self.assertEquals(share.other_visible_name, old_share.other_visible_name)
2069 self.assertEquals(share.access_level, old_share.access_level)
2070
2071 for sid in vm.shares:
2072 old_share = legacy_shares[sid]
2073 share = vm.shares[sid]
2074 self.assertTrue(isinstance(share, Share) or isinstance(share, Root))
2075 compare_share(share, old_share)
2076
2077 for sid in vm.shared:
2078 old_share = legacy_shared[sid]
2079 share = vm.shared[sid]
2080 self.assertTrue(isinstance(share, Shared))
2081 compare_share(share, old_share)
2082
2083 for udf_id in vm.udfs:
2084 old_udf = legacy_udfs[udf_id]
2085 udf = vm.udfs[udf_id]
2086 self.assertTrue(isinstance(udf, UDF))
2087 self.assertEquals(udf.volume_id, old_udf.id)
2088 self.assertEquals(udf.path, old_udf.path)
2089 self.assertEquals(udf.node_id, old_udf.node_id)
2090 self.assertEquals(udf.suggested_path, old_udf.suggested_path)
2091 self.assertEquals(udf.subscribed, old_udf.subscribed)
2092
2093
2094class BrokenOldMDVersionUpgradeTests(MetadataOldLayoutTests):
2095 """MetadataOldLayoutTests with broken .version file."""
2096 md_version_None = True
2097
2098
2099class BrokenNewMDVersionUpgradeTests(MetadataNewLayoutTests):
2100 """MetadataNewLayoutTests with broken .version file."""
2101 md_version_None = True
2102
2103
2104class MetadataUpgraderTests(MetadataTestCase):
2105 """MetadataUpgrader tests."""
2106
2107 def setUp(self):
2108 """Create the MetadataUpgrader instance."""
2109 MetadataTestCase.setUp(self)
2110 self.share_md_dir = os.path.join(self.vm_data_dir, 'shares')
2111 self.shared_md_dir = os.path.join(self.vm_data_dir, 'shared')
2112 self.udfs_md_dir = os.path.join(self.vm_data_dir, 'udfs')
2113 self.home_dir = os.path.join(self.tmpdir, 'home', 'ubuntuonehacker')
2114 self.u1_dir = os.path.join(self.home_dir, os.path.split(self.u1_dir)[1])
2115 self.root_dir = self.u1_dir
2116 self.shares_dir = os.path.join(self.tmpdir, 'shares')
2117 self.shares_dir_link = os.path.join(self.u1_dir, 'Shared With Me')
2118 for path in [self.share_md_dir, self.shared_md_dir,
2119 self.root_dir, self.shares_dir]:
2120 if not os.path.exists(path):
2121 os.makedirs(path)
2122 os.symlink(self.shares_dir, self.shares_dir_link)
2123 self.old_get_md_version = MetadataUpgrader._get_md_version
2124 MetadataUpgrader._get_md_version = lambda _: None
2125 self.md_upgrader = MetadataUpgrader(self.vm_data_dir, self.share_md_dir,
2126 self.shared_md_dir,
2127 self.udfs_md_dir, self.root_dir,
2128 self.shares_dir,
2129 self.shares_dir_link)
2130 def tearDown(self):
2131 """Restorre _get_md_version"""
2132 MetadataUpgrader._get_md_version = self.old_get_md_version
2133 MetadataTestCase.tearDown(self)
2134
2135 def test_guess_metadata_version_None(self):
2136 """Test _guess_metadata_version method for pre-version."""
2137 # fake a version None layout
2138 if os.path.exists(self.version_file):
2139 os.unlink(self.version_file)
2140 for path in [self.share_md_dir, self.shared_md_dir,
2141 self.root_dir, self.shares_dir]:
2142 if os.path.exists(path):
2143 self.rmtree(path)
2144 os.makedirs(os.path.join(self.root_dir, 'My Files'))
2145 shares_dir = os.path.join(self.root_dir, 'Shared With Me')
2146 os.makedirs(shares_dir)
2147 os.chmod(self.root_dir, 0500)
2148 os.chmod(shares_dir, 0500)
2149 version = self.md_upgrader._guess_metadata_version()
2150 self.assertEquals(None, version)
2151
2152 def test_guess_metadata_version_1_or_2(self):
2153 """Test _guess_metadata_version method for version 1 or 2."""
2154 # fake a version 1 layout
2155 if os.path.exists(self.version_file):
2156 os.unlink(self.version_file)
2157 self.rmtree(self.root_dir)
2158 os.makedirs(os.path.join(self.root_dir, 'My Files'))
2159 shares_dir = os.path.join(self.root_dir, 'Shared With Me')
2160 os.makedirs(shares_dir)
2161 os.chmod(self.root_dir, 0500)
2162 os.chmod(shares_dir, 0500)
2163 self.rmtree(self.shares_dir)
2164 version = self.md_upgrader._guess_metadata_version()
2165 self.assertIn(version, ['1', '2'])
2166
2167 def test_guess_metadata_version_4(self):
2168 """Test _guess_metadata_version method for version 4."""
2169 # fake a version 4 layout
2170 if os.path.exists(self.version_file):
2171 os.unlink(self.version_file)
2172 os.unlink(self.shares_dir_link)
2173 os.symlink(self.shares_dir_link, self.shares_dir_link)
2174 version = self.md_upgrader._guess_metadata_version()
2175 self.assertEquals(version, '4')
2176
2177 def test_guess_metadata_version_5(self):
2178 """Test _guess_metadata_version method for version 5."""
2179 # fake a version 5 layout and metadata
2180 shelf = LegacyShareFileShelf(self.share_md_dir)
2181 shelf['foobar'] = _Share(path='/foo/bar', share_id='foobar')
2182 version = self.md_upgrader._guess_metadata_version()
2183 self.assertEquals(version, '5')
2184
2185 def test_guess_metadata_version_6(self):
2186 """Test _guess_metadata_version method for version 6."""
2187 # fake a version 6 layout and metadata
2188 shelf = VMFileShelf(self.share_md_dir)
2189 shelf['foobar'] = Share(path='/foo/bar', volume_id='foobar')
2190 version = self.md_upgrader._guess_metadata_version()
2191 self.assertEquals(version, '6')
2192
18492193
=== modified file 'ubuntuone/syncdaemon/dbus_interface.py'
--- ubuntuone/syncdaemon/dbus_interface.py 2010-01-26 14:32:54 +0000
+++ ubuntuone/syncdaemon/dbus_interface.py 2010-01-26 20:35:29 +0000
@@ -544,7 +544,9 @@
544 def handle_SV_SHARE_CHANGED(self, message, share):544 def handle_SV_SHARE_CHANGED(self, message, share):
545 """ handle SV_SHARE_CHANGED event, emit's ShareChanged signal. """545 """ handle SV_SHARE_CHANGED event, emit's ShareChanged signal. """
546 self.handle_default('SV_SHARE_CHANGED', message, share)546 self.handle_default('SV_SHARE_CHANGED', message, share)
547 self.dbus_iface.shares.emit_share_changed(message, share)547 if message != 'deleted':
548 # deleted shares are handled in VM
549 self.dbus_iface.shares.emit_share_changed(message, share)
548550
549 def handle_SV_FREE_SPACE(self, share_id, free_bytes):551 def handle_SV_FREE_SPACE(self, share_id, free_bytes):
550 """ handle SV_FREE_SPACE event, emit ShareChanged signal. """552 """ handle SV_FREE_SPACE event, emit ShareChanged signal. """
@@ -871,6 +873,8 @@
871 share_dict[unicode(k)] = ''873 share_dict[unicode(k)] = ''
872 elif k == 'path':874 elif k == 'path':
873 share_dict[unicode(k)] = v.decode('utf-8')875 share_dict[unicode(k)] = v.decode('utf-8')
876 elif k == 'accepted':
877 share_dict[unicode(k)] = self.bool_str(v)
874 else:878 else:
875 share_dict[unicode(k)] = unicode(v)879 share_dict[unicode(k)] = unicode(v)
876 return share_dict880 return share_dict
@@ -949,7 +953,7 @@
949953
950 def emit_share_answer_response(self, share_id, answer, error=None):954 def emit_share_answer_response(self, share_id, answer, error=None):
951 """ emits ShareCreated signal """955 """ emits ShareCreated signal """
952 answer_info = dict(share_id=share_id, answer=answer)956 answer_info = dict(volume_id=share_id, answer=answer)
953 if error:957 if error:
954 answer_info['error'] = error958 answer_info['error'] = error
955 self.ShareAnswerResponse(answer_info)959 self.ShareAnswerResponse(answer_info)
956960
=== modified file 'ubuntuone/syncdaemon/event_queue.py'
--- ubuntuone/syncdaemon/event_queue.py 2010-01-26 15:54:29 +0000
+++ ubuntuone/syncdaemon/event_queue.py 2010-01-26 20:35:29 +0000
@@ -159,7 +159,7 @@
159}159}
160160
161# these are the events that will listen from inotify161# these are the events that will listen from inotify
162INOTIFY_EVENTS = (162INOTIFY_EVENTS_GENERAL = (
163 evtcodes.IN_OPEN |163 evtcodes.IN_OPEN |
164 evtcodes.IN_CLOSE_NOWRITE |164 evtcodes.IN_CLOSE_NOWRITE |
165 evtcodes.IN_CLOSE_WRITE |165 evtcodes.IN_CLOSE_WRITE |
@@ -169,6 +169,12 @@
169 evtcodes.IN_MOVED_TO |169 evtcodes.IN_MOVED_TO |
170 evtcodes.IN_MOVE_SELF170 evtcodes.IN_MOVE_SELF
171)171)
172INOTIFY_EVENTS_ANCESTORS = (
173 evtcodes.IN_DELETE |
174 evtcodes.IN_MOVED_FROM |
175 evtcodes.IN_MOVED_TO |
176 evtcodes.IN_MOVE_SELF
177)
172178
173DEFAULT_HANDLER = "handle_default" # receives (event_name, *args, **kwargs)179DEFAULT_HANDLER = "handle_default" # receives (event_name, *args, **kwargs)
174180
@@ -201,14 +207,58 @@
201 return True207 return True
202208
203209
204class _INotifyProcessor(pyinotify.ProcessEvent):210class _AncestorsINotifyProcessor(pyinotify.ProcessEvent):
205 """Helper class that is called from inpotify when an event happens.211 """inotify's processor when an event happens on an UDFs ancestor."""
212 def __init__(self, eq):
213 self.log = logging.getLogger('ubuntuone.SyncDaemon.AncestorsINotProc')
214 self.eq = eq
215
216 def _get_udf(self, path):
217 """Get the udf for a specific path.
218
219 It can return None in case the UDF was deleted in the meantime.
220 """
221 for udf in self.eq.fs.vm.udfs.itervalues():
222 parent = os.path.dirname(udf.path) + os.path.sep
223 if parent.startswith(path + os.path.sep):
224 return udf
225 return None
226
227 def process_IN_MOVE_SELF(self, event):
228 """Don't do anything here.
229
230 We just turned this event on because pyinotify does some
231 path-fixing in its internal processing when this happens.
232 """
233 process_IN_MOVED_TO = process_IN_MOVE_SELF
234
235 def process_IN_MOVED_FROM(self, event):
236 """Getting it out or renaming means unsuscribe."""
237 if event.mask & evtcodes.IN_ISDIR:
238 udf = self._get_udf(event.path)
239 if udf is not None:
240 self.log.info("Got MOVED_FROM on path %r, unsubscribing "
241 "udf %s", event.path, udf)
242 self.eq.fs.vm.unsubscribe_udf(udf.volume_id)
243
244 def process_IN_DELETE(self, event):
245 """Check to see if the UDF was deleted."""
246 if event.mask & evtcodes.IN_ISDIR:
247 udf = self._get_udf(event.path)
248 if udf is not None and udf.path == event.pathname:
249 self.log.info("Got DELETE on path %r, deleting udf %s",
250 event.path, udf)
251 self.eq.fs.vm.delete_volume(udf.volume_id)
252
253
254class _GeneralINotifyProcessor(pyinotify.ProcessEvent):
255 """inotify's processor when a general event happens.
206256
207 This class also catchs the MOVEs events, and synthetises a new257 This class also catchs the MOVEs events, and synthetises a new
208 FS_(DIR|FILE)_MOVE event when possible.258 FS_(DIR|FILE)_MOVE event when possible.
209 """259 """
210 def __init__(self, eq):260 def __init__(self, eq):
211 self.log = logging.getLogger('ubuntuone.SyncDaemon.INotifyProcessor')261 self.log = logging.getLogger('ubuntuone.SyncDaemon.GeneralINotProc')
212 self.eq = eq262 self.eq = eq
213 self.held_event = None263 self.held_event = None
214 self.timer = None264 self.timer = None
@@ -216,16 +266,6 @@
216 self.frozen_evts = False266 self.frozen_evts = False
217 self._to_mute = MuteFilter()267 self._to_mute = MuteFilter()
218268
219 def _is_udf_ancestor(self, path):
220 """Decide if path is an UDF ancestor or not."""
221 result = None
222 for udf in self.eq.fs.vm.udfs.itervalues():
223 parent = os.path.dirname(udf.path) + os.path.sep
224 if parent.startswith(path + os.path.sep):
225 return udf
226
227 return result
228
229 def add_to_mute_filter(self, event, *paths):269 def add_to_mute_filter(self, event, *paths):
230 """Add an event and path(s) to the mute filter."""270 """Add an event and path(s) to the mute filter."""
231 # all events have one path except the MOVEs271 # all events have one path except the MOVEs
@@ -261,14 +301,12 @@
261301
262 def process_IN_OPEN(self, event):302 def process_IN_OPEN(self, event):
263 """Filter IN_OPEN to make it happen only in files."""303 """Filter IN_OPEN to make it happen only in files."""
264 if not (event.mask & evtcodes.IN_ISDIR) and \304 if not (event.mask & evtcodes.IN_ISDIR):
265 not self._is_udf_ancestor(event.path):
266 self.push_event(event)305 self.push_event(event)
267306
268 def process_IN_CLOSE_NOWRITE(self, event):307 def process_IN_CLOSE_NOWRITE(self, event):
269 """Filter IN_CLOSE_NOWRITE to make it happen only in files."""308 """Filter IN_CLOSE_NOWRITE to make it happen only in files."""
270 if not (event.mask & evtcodes.IN_ISDIR) and \309 if not (event.mask & evtcodes.IN_ISDIR):
271 not self._is_udf_ancestor(event.path):
272 self.push_event(event)310 self.push_event(event)
273311
274 def process_IN_MOVE_SELF(self, event):312 def process_IN_MOVE_SELF(self, event):
@@ -281,11 +319,6 @@
281319
282 def process_IN_MOVED_FROM(self, event):320 def process_IN_MOVED_FROM(self, event):
283 """Capture the MOVED_FROM to maybe syntethize FILE_MOVED."""321 """Capture the MOVED_FROM to maybe syntethize FILE_MOVED."""
284 udf = self._is_udf_ancestor(event.path)
285 if udf is not None:
286 self.eq.fs.vm.unsubscribe_udf(udf.volume_id)
287 return
288
289 if self.held_event is not None:322 if self.held_event is not None:
290 self.release_held_event()323 self.release_held_event()
291324
@@ -317,9 +350,6 @@
317350
318 def process_IN_MOVED_TO(self, event):351 def process_IN_MOVED_TO(self, event):
319 """Capture the MOVED_TO to maybe syntethize FILE_MOVED."""352 """Capture the MOVED_TO to maybe syntethize FILE_MOVED."""
320 if self._is_udf_ancestor(event.path):
321 return
322
323 if self.held_event is not None:353 if self.held_event is not None:
324 if event.cookie == self.held_event.cookie:354 if event.cookie == self.held_event.cookie:
325 try:355 try:
@@ -381,16 +411,6 @@
381411
382 def process_default(self, event):412 def process_default(self, event):
383 """Push the event into the EventQueue."""413 """Push the event into the EventQueue."""
384 udf = self._is_udf_ancestor(event.path)
385 if udf is not None:
386 # if event is the deletion of the UDF per se,
387 # call delete_volume on VolumeManager for that UDF.
388 ename = NAME_TRANSLATIONS.get(event.mask, None)
389 is_dir_delete = ename is not None and ename == 'FS_DIR_DELETE'
390 if udf.path == event.pathname and is_dir_delete:
391 self.eq.fs.vm.delete_volume(udf.volume_id)
392 return
393
394 if self.held_event is not None:414 if self.held_event is not None:
395 self.release_held_event()415 self.release_held_event()
396 self.push_event(event)416 self.push_event(event)
@@ -482,13 +502,23 @@
482502
483 self.log = logging.getLogger('ubuntuone.SyncDaemon.EQ')503 self.log = logging.getLogger('ubuntuone.SyncDaemon.EQ')
484 self.fs = fs504 self.fs = fs
485 # hook inotify505
486 self._inotify_reader = None506 # general inotify
487 self._inotify_wm = wm = pyinotify.WatchManager()507 self._inotify_general_wm = wm = pyinotify.WatchManager()
488 self._processor = _INotifyProcessor(self)508 self._processor = _GeneralINotifyProcessor(self)
489 self._inotify_notifier = pyinotify.Notifier(wm, self._processor)509 self._inotify_notifier_gral = pyinotify.Notifier(wm, self._processor)
490 self._hook_inotify_to_twisted(wm, self._inotify_notifier)510 self._inotify_reader_gral = self._hook_inotify_to_twisted(
491 self._watchs = {}511 wm, self._inotify_notifier_gral)
512 self._general_watchs = {}
513
514 # ancestors inotify
515 self._inotify_ancestors_wm = wm = pyinotify.WatchManager()
516 antr_processor = _AncestorsINotifyProcessor(self)
517 self._inotify_notifier_antr = pyinotify.Notifier(wm, antr_processor)
518 self._inotify_reader_antr = self._hook_inotify_to_twisted(
519 wm, self._inotify_notifier_antr)
520 self._ancestors_watchs = {}
521
492 self.dispatching = False522 self.dispatching = False
493 self.dispatch_queue = Queue()523 self.dispatch_queue = Queue()
494 self.empty_event_queue_callbacks = set()524 self.empty_event_queue_callbacks = set()
@@ -526,34 +556,64 @@
526 notifier.process_events()556 notifier.process_events()
527557
528 reader = MyReader()558 reader = MyReader()
529 self._inotify_reader = reader
530 reactor.addReader(reader)559 reactor.addReader(reader)
560 return reader
531561
532 def shutdown(self):562 def shutdown(self):
533 """Prepares the EQ to be closed."""563 """Prepares the EQ to be closed."""
534 self._inotify_notifier.stop()564 self._inotify_notifier_gral.stop()
535 reactor.removeReader(self._inotify_reader)565 self._inotify_notifier_antr.stop()
566 reactor.removeReader(self._inotify_reader_gral)
567 reactor.removeReader(self._inotify_reader_antr)
536568
537 def inotify_rm_watch(self, dirpath):569 def inotify_rm_watch(self, dirpath):
538 """Remove watch from a dir."""570 """Remove watch from a dir."""
539 try:571 if dirpath in self._general_watchs:
540 wd = self._watchs[dirpath]572 w_dict = self._general_watchs
541 except KeyError:573 w_manager = self._inotify_general_wm
574 elif dirpath in self._ancestors_watchs:
575 w_dict = self._ancestors_watchs
576 w_manager = self._inotify_ancestors_wm
577 else:
542 raise ValueError("The path %r is not watched right now!" % dirpath)578 raise ValueError("The path %r is not watched right now!" % dirpath)
543 result = self._inotify_wm.rm_watch(wd)579
580 wd = w_dict[dirpath]
581 result = w_manager.rm_watch(wd)
544 if not result[wd]:582 if not result[wd]:
545 raise RuntimeError("The path %r couldn't be removed!" % dirpath)583 raise RuntimeError("The path %r couldn't be removed!" % dirpath)
546 del self._watchs[dirpath]584 del w_dict[dirpath]
547585
548 def inotify_add_watch(self, dirpath):586 def inotify_add_watch(self, dirpath):
549 """Add watch to a dir."""587 """Add watch to a dir."""
550 self.log.debug("Adding inotify watch to %r", dirpath)588 # see where to add it
551 result = self._inotify_wm.add_watch(dirpath, INOTIFY_EVENTS)589 if self._is_udf_ancestor(dirpath):
552 self._watchs[dirpath] = result[dirpath]590 w_type = "ancestors"
591 w_manager = self._inotify_ancestors_wm
592 w_dict = self._ancestors_watchs
593 events = INOTIFY_EVENTS_ANCESTORS
594 else:
595 w_type = "general"
596 w_manager = self._inotify_general_wm
597 w_dict = self._general_watchs
598 events = INOTIFY_EVENTS_GENERAL
599
600 # add the watch!
601 self.log.debug("Adding %s inotify watch to %r", w_type, dirpath)
602 result = w_manager.add_watch(dirpath, events)
603 w_dict[dirpath] = result[dirpath]
553604
554 def inotify_has_watch(self, dirpath):605 def inotify_has_watch(self, dirpath):
555 """Check if a dirpath is watched."""606 """Check if a dirpath is watched."""
556 return dirpath in self._watchs607 return (dirpath in self._general_watchs or
608 dirpath in self._ancestors_watchs)
609
610 def _is_udf_ancestor(self, path):
611 """Decide if path is an UDF ancestor or not."""
612 for udf in self.fs.vm.udfs.itervalues():
613 parent = os.path.dirname(udf.path) + os.path.sep
614 if parent.startswith(path + os.path.sep):
615 return True
616 return False
557617
558 def unsubscribe(self, obj):618 def unsubscribe(self, obj):
559 """Removes the callback object from the listener queue.619 """Removes the callback object from the listener queue.
560620
=== modified file 'ubuntuone/syncdaemon/tools.py'
--- ubuntuone/syncdaemon/tools.py 2010-01-25 14:58:45 +0000
+++ ubuntuone/syncdaemon/tools.py 2010-01-26 20:35:29 +0000
@@ -298,7 +298,7 @@
298 self.log.debug('accept_share(%s)', share_id)298 self.log.debug('accept_share(%s)', share_id)
299 shares_client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)299 shares_client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)
300 d = self.wait_for_signal('ShareAnswerResponse',300 d = self.wait_for_signal('ShareAnswerResponse',
301 lambda info: info['share_id']==share_id)301 lambda info: info['volume_id']==share_id)
302 shares_client.call_method('accept_share', share_id,302 shares_client.call_method('accept_share', share_id,
303 reply_handler=lambda _: None,303 reply_handler=lambda _: None,
304 error_handler=d.errback)304 error_handler=d.errback)
@@ -309,7 +309,7 @@
309 self.log.debug('reject_share(%s)', share_id)309 self.log.debug('reject_share(%s)', share_id)
310 shares_client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)310 shares_client = DBusClient(self.bus, '/shares', DBUS_IFACE_SHARES_NAME)
311 d = self.wait_for_signal('ShareAnswerResponse',311 d = self.wait_for_signal('ShareAnswerResponse',
312 lambda info: info['share_id']==share_id)312 lambda info: info['volume_id']==share_id)
313 shares_client.call_method('reject_share', share_id,313 shares_client.call_method('reject_share', share_id,
314 reply_handler=lambda _: None,314 reply_handler=lambda _: None,
315 error_handler=d.errback)315 error_handler=d.errback)
@@ -601,8 +601,8 @@
601 for share in shares:601 for share in shares:
602 msg_template = ' id=%s name=%s accepted=%s ' + \602 msg_template = ' id=%s name=%s accepted=%s ' + \
603 'access_level=%s to=%s path=%s\n'603 'access_level=%s to=%s path=%s\n'
604 out.write(msg_template % (share['id'], share['name'],604 out.write(msg_template % (share['volume_id'], share['name'],
605 share['accepted'], share['access_level'],605 bool(share['accepted']), share['access_level'],
606 share['other_username'],606 share['other_username'],
607 share['path']))607 share['path']))
608608
@@ -615,7 +615,7 @@
615 out.write("Folder list:\n")615 out.write("Folder list:\n")
616 for folder in folders:616 for folder in folders:
617 msg_template = ' id=%s subscribed=%s path=%s\n'617 msg_template = ' id=%s subscribed=%s path=%s\n'
618 out.write(msg_template % (folder['id'], folder['subscribed'],618 out.write(msg_template % (folder['volume_id'], folder['subscribed'],
619 folder['path']))619 folder['path']))
620620
621621
@@ -636,7 +636,7 @@
636 out.write("Shares list:\n")636 out.write("Shares list:\n")
637 for share in shares:637 for share in shares:
638 out.write(' id=%s name=%s accepted=%s access_level=%s from=%s\n' % \638 out.write(' id=%s name=%s accepted=%s access_level=%s from=%s\n' % \
639 (share['id'], share['name'], share['accepted'],639 (share['volume_id'], share['name'], bool(share['accepted']),
640 share['access_level'], share['other_username']))640 share['access_level'], share['other_username']))
641641
642642
643643
=== modified file 'ubuntuone/syncdaemon/volume_manager.py'
--- ubuntuone/syncdaemon/volume_manager.py 2010-01-26 19:40:05 +0000
+++ ubuntuone/syncdaemon/volume_manager.py 2010-01-26 20:35:29 +0000
@@ -41,10 +41,10 @@
41from twisted.internet import defer41from twisted.internet import defer
4242
4343
44class Share(object):44class _Share(object):
45 """Represents a share or mount point"""45 """Represents a share or mount point"""
4646
47 def __init__(self, volume_id=request.ROOT, node_id=None, path=None,47 def __init__(self, share_id=request.ROOT, node_id=None, path=None,
48 name=None, access_level='View', accepted=False,48 name=None, access_level='View', accepted=False,
49 other_username=None, other_visible_name=None):49 other_username=None, other_visible_name=None):
50 """ Creates the instance.50 """ Creates the instance.
@@ -55,7 +55,7 @@
55 self.path = None55 self.path = None
56 else:56 else:
57 self.path = os.path.normpath(path)57 self.path = os.path.normpath(path)
58 self.id = str(volume_id)58 self.id = str(share_id)
59 self.access_level = access_level59 self.access_level = access_level
60 self.accepted = accepted60 self.accepted = accepted
61 self.name = name61 self.name = name
@@ -64,16 +64,77 @@
64 self.subtree = node_id64 self.subtree = node_id
65 self.free_bytes = None65 self.free_bytes = None
6666
67
68class _UDF(object):
69 """A representation of a User Defined Folder."""
70
71 def __init__(self, udf_id, node_id, suggested_path,
72 path, subscribed=True):
73 """Create the UDF, subscribed by default"""
74 # id and node_id should be str or None
75 assert isinstance(udf_id, basestring) or udf_id is None
76 assert isinstance(node_id, basestring) or node_id is None
77 self.id = udf_id
78 self.node_id = node_id
79 self.suggested_path = suggested_path
80 self.path = path
81 self.subscribed = subscribed
82
83
84class Volume(object):
85 """A generic volume."""
86
87 def __init__(self, volume_id, node_id):
88 """Create the volume."""
89 # id and node_id should be str or None
90 assert isinstance(volume_id, basestring) or volume_id is None
91 assert isinstance(node_id, basestring) or node_id is None
92 self.volume_id = volume_id
93 self.node_id = node_id
94
95 @property
96 def id(self):
97 return self.volume_id
98
99 def can_write(self):
100 raise NotImplementedError('Subclass responsability')
101
102 def __eq__(self, other):
103 result = (self.id == other.id and
104 self.node_id == other.node_id)
105 return result
106
107
108class Share(Volume):
109 """A volume representing a Share."""
110
111 def __init__(self, volume_id=None, node_id=None, path=None, name=None,
112 other_username=None, other_visible_name=None, accepted=False,
113 access_level='View', free_bytes=None):
114 """Create the share."""
115 super(Share, self).__init__(volume_id, node_id)
116 self.__dict__['type'] = 'Share'
117 if path is None:
118 self.path = None
119 else:
120 self.path = os.path.normpath(path)
121 self.name = name
122 self.other_username = other_username
123 self.other_visible_name = other_visible_name
124 self.accepted = accepted
125 self.access_level = access_level
126 self.free_bytes = free_bytes
127
67 @classmethod128 @classmethod
68 def from_response(cls, share_response, path):129 def from_response(cls, share_response, path):
69 """ Creates a Share instance from a ShareResponse.130 """ Creates a Share instance from a ShareResponse.
70131
71 The received path should be 'bytes'132 The received path should be 'bytes'
72 """133 """
73 share = cls(str(share_response.id), share_response.subtree, path,134 share = cls(str(share_response.id), str(share_response.subtree),
74 share_response.name, share_response.access_level,135 path, share_response.name, share_response.other_username,
75 share_response.accepted, share_response.other_username,136 share_response.other_visible_name,
76 share_response.other_visible_name)137 share_response.accepted, share_response.access_level)
77 return share138 return share
78139
79 @classmethod140 @classmethod
@@ -82,12 +143,12 @@
82143
83 The received path should be 'bytes'144 The received path should be 'bytes'
84 """145 """
85 share = cls(path=path, volume_id=str(share_notify.share_id),146 share = cls(volume_id=str(share_notify.share_id),
86 name=share_notify.share_name,147 node_id=str(share_notify.subtree),
87 access_level=share_notify.access_level,148 path=path, name=share_notify.share_name,
88 other_username=share_notify.from_username,149 other_username=share_notify.from_username,
89 other_visible_name=share_notify.from_visible_name,150 other_visible_name=share_notify.from_visible_name,
90 node_id=share_notify.subtree)151 access_level=share_notify.access_level)
91 return share152 return share
92153
93 @classmethod154 @classmethod
@@ -97,13 +158,11 @@
97 The received path should be 'bytes'158 The received path should be 'bytes'
98159
99 """160 """
100 share = cls(volume_id=str(share_volume.volume_id), path=path,161 share = cls(str(share_volume.volume_id), str(share_volume.node_id),
101 name=share_volume.share_name,162 path, share_volume.share_name,
102 access_level=share_volume.access_level,163 share_volume.other_username,
103 other_username=share_volume.other_username,164 share_volume.other_visible_name, share_volume.accepted,
104 other_visible_name=share_volume.other_visible_name,165 share_volume.access_level)
105 node_id=str(share_volume.node_id),
106 accepted=share_volume.accepted)
107 return share166 return share
108167
109 def can_write(self):168 def can_write(self):
@@ -114,35 +173,64 @@
114173
115 @property174 @property
116 def active(self):175 def active(self):
117 """Returns True if the Share is accepted."""176 """Return True if this Share is accepted."""
118 return self.accepted177 return self.accepted
119178
120 # node_id property179 def __eq__(self, other):
121 def _set_node_id(self, node_id):180 result = (super(Share, self).__eq__(other) and
122 self.subtree = node_id181 self.path == other.path and
123 node_id = property(lambda self: self.subtree, _set_node_id)182 self.name == other.name and
124183 self.other_username == other.other_username and
125 # volume_id property184 self.other_visible_name == other.other_visible_name and
126 def _set_volume_id(self, volume_id):185 self.accepted == other.accepted and
127 self.id = volume_id186 self.access_level == other.access_level)
128 volume_id = property(lambda self: self.id, _set_volume_id)187 return result
129188
130189
131class UDF(object):190class Shared(Share):
132 """A representation of a User Defined Folder."""191
133192 def __init__(self, *args, **kwargs):
134 def __init__(self, volume_id, node_id, suggested_path,193 super(Shared, self).__init__(*args, **kwargs)
135 path, subscribed=True):194 self.__dict__['type'] = 'Shared'
195
196
197class Root(Volume):
198 """A volume representing the root."""
199
200 def __init__(self, volume_id=None, node_id=None, path=None):
201 """Create the Root."""
202 super(Root, self).__init__(volume_id, node_id)
203 self.__dict__['type'] = 'Root'
204 self.path = path
205
206 def __eq__(self, other):
207 result = (super(Root, self).__eq__(other) and
208 self.path == other.path)
209 return result
210
211 def can_write(self):
212 return True
213
214 def is_active(self):
215 return True
216
217
218class UDF(Volume):
219 """A volume representing a User Defined Folder."""
220
221 def __init__(self, volume_id=None, node_id=None,
222 suggested_path=None, path=None, subscribed=True):
136 """Create the UDF, subscribed by default"""223 """Create the UDF, subscribed by default"""
137 # id and node_id should be str or None224 super(UDF, self).__init__(volume_id, node_id)
138 assert isinstance(volume_id, basestring) or volume_id is None225 self.__dict__['type'] = 'UDF'
139 assert isinstance(node_id, basestring) or node_id is None
140 self.id = volume_id
141 self.node_id = node_id226 self.node_id = node_id
142 self.suggested_path = suggested_path227 self.suggested_path = suggested_path
143 self.path = path228 self.path = path
144 self.subscribed = subscribed229 self.subscribed = subscribed
145230
231 def __repr__(self):
232 return "<UDF id %r, real path %r>" % (self.id, self.path)
233
146 @property234 @property
147 def ancestors(self):235 def ancestors(self):
148 """Calculate all the ancestors for this UDF's path."""236 """Calculate all the ancestors for this UDF's path."""
@@ -161,6 +249,11 @@
161 """We always can write in a UDF."""249 """We always can write in a UDF."""
162 return True250 return True
163251
252 @property
253 def active(self):
254 """Returns True if the UDF is subscribed."""
255 return self.subscribed
256
164 @classmethod257 @classmethod
165 def from_udf_volume(cls, udf_volume, path):258 def from_udf_volume(cls, udf_volume, path):
166 """Creates a UDF instance from a volumes.UDFVolume.259 """Creates a UDF instance from a volumes.UDFVolume.
@@ -171,21 +264,18 @@
171 return cls(str(udf_volume.volume_id), str(udf_volume.node_id),264 return cls(str(udf_volume.volume_id), str(udf_volume.node_id),
172 udf_volume.suggested_path, path)265 udf_volume.suggested_path, path)
173266
174 @property267 def __eq__(self, other):
175 def active(self):268 result = (super(UDF, self).__eq__(other) and
176 """Returns True if the UDF is subscribed."""269 self.suggested_path == other.suggested_path and
177 return self.subscribed270 self.path == other.path and
178271 self.subscribed == other.subscribed)
179 # volume_id property272 return result
180 def _set_volume_id(self, volume_id):
181 self.id = volume_id
182 volume_id = property(lambda self: self.id, _set_volume_id)
183273
184274
185class VolumeManager(object):275class VolumeManager(object):
186 """Manages shares and mount points."""276 """Manages shares and mount points."""
187277
188 METADATA_VERSION = '5'278 METADATA_VERSION = '6'
189279
190 def __init__(self, main):280 def __init__(self, main):
191 """Create the instance and populate the shares/d attributes281 """Create the instance and populate the shares/d attributes
@@ -199,8 +289,9 @@
199 self._udfs_dir = os.path.join(self._data_dir, 'udfs')289 self._udfs_dir = os.path.join(self._data_dir, 'udfs')
200290
201 md_upgrader = MetadataUpgrader(self._data_dir, self._shares_dir,291 md_upgrader = MetadataUpgrader(self._data_dir, self._shares_dir,
202 self._shared_dir, self.m.root_dir,292 self._shared_dir, self._udfs_dir,
203 self.m.shares_dir, self.m.shares_dir_link)293 self.m.root_dir, self.m.shares_dir,
294 self.m.shares_dir_link)
204 md_upgrader.upgrade_metadata()295 md_upgrader.upgrade_metadata()
205296
206 # build the dir layout297 # build the dir layout
@@ -233,7 +324,7 @@
233 self.shared = VMFileShelf(self._shared_dir)324 self.shared = VMFileShelf(self._shared_dir)
234 self.udfs = VMFileShelf(self._udfs_dir)325 self.udfs = VMFileShelf(self._udfs_dir)
235 if self.shares.get(request.ROOT) is None:326 if self.shares.get(request.ROOT) is None:
236 self.root = Share(self.m.root_dir)327 self.root = Root(path=self.m.root_dir)
237 else:328 else:
238 self.root = self.shares[request.ROOT]329 self.root = self.shares[request.ROOT]
239 self.root.access_level = 'Modify'330 self.root.access_level = 'Modify'
@@ -356,7 +447,7 @@
356 self.log.warning("we got a share with 'from_me' direction,"447 self.log.warning("we got a share with 'from_me' direction,"
357 " but don't have the node_id in the metadata yet")448 " but don't have the node_id in the metadata yet")
358 path = None449 path = None
359 share = Share.from_response(a_share, path)450 share = Shared.from_response(a_share, path)
360 shared.append(share.volume_id)451 shared.append(share.volume_id)
361 self.add_shared(share)452 self.add_shared(share)
362 self._cleanup_volumes(shares, shared)453 self._cleanup_volumes(shares, shared)
@@ -583,7 +674,7 @@
583 mdobj = self.m.fs.get_by_path(path)674 mdobj = self.m.fs.get_by_path(path)
584 mdid = mdobj.mdid675 mdid = mdobj.mdid
585 marker = MDMarker(mdid)676 marker = MDMarker(mdid)
586 share = Share(path=self.m.fs.get_abspath("", mdobj.path),677 share = Shared(path=self.m.fs.get_abspath("", mdobj.path),
587 volume_id=marker,678 volume_id=marker,
588 name=name, access_level=access_level,679 name=name, access_level=access_level,
589 other_username=username, other_visible_name=None,680 other_username=username, other_visible_name=None,
@@ -748,17 +839,18 @@
748 udf = self.udfs[udf_id]839 udf = self.udfs[udf_id]
749 except KeyError:840 except KeyError:
750 push_error("DOES_NOT_EXIST")841 push_error("DOES_NOT_EXIST")
751 udf.subscribed = True
752 self.udfs[udf_id] = udf
753 try:
754 d = self._scan_udf(udf)
755 except KeyError, e:
756 push_error("METADATA_DOES_NOT_EXIST")
757 else:842 else:
758 d.addCallbacks(843 udf.subscribed = True
759 lambda _: self.m.event_q.push('VM_UDF_SUBSCRIBED', udf),844 self.udfs[udf_id] = udf
760 lambda f: push_error(f.getErrorMessage()))845 try:
761 return d846 d = self._scan_udf(udf)
847 except KeyError, e:
848 push_error("METADATA_DOES_NOT_EXIST")
849 else:
850 d.addCallbacks(
851 lambda _: self.m.event_q.push('VM_UDF_SUBSCRIBED', udf),
852 lambda f: push_error(f.getErrorMessage()))
853 return d
762854
763 def _scan_udf(self, udf):855 def _scan_udf(self, udf):
764 """Local and server rescan of a UDF."""856 """Local and server rescan of a UDF."""
@@ -844,7 +936,7 @@
844class MetadataUpgrader(object):936class MetadataUpgrader(object):
845 """A class that loads old metadata and migrate it."""937 """A class that loads old metadata and migrate it."""
846938
847 def __init__(self, data_dir, shares_md_dir, shared_md_dir,939 def __init__(self, data_dir, shares_md_dir, shared_md_dir, udfs_md_dir,
848 root_dir, shares_dir, shares_dir_link):940 root_dir, shares_dir, shares_dir_link):
849 """Creates the instance"""941 """Creates the instance"""
850 self.log = logging.getLogger('ubuntuone.SyncDaemon.VM.MD')942 self.log = logging.getLogger('ubuntuone.SyncDaemon.VM.MD')
@@ -852,6 +944,7 @@
852 self._shares_dir = shares_dir944 self._shares_dir = shares_dir
853 self._shares_md_dir = shares_md_dir945 self._shares_md_dir = shares_md_dir
854 self._shared_md_dir = shared_md_dir946 self._shared_md_dir = shared_md_dir
947 self._udfs_md_dir = udfs_md_dir
855 self._root_dir = root_dir948 self._root_dir = root_dir
856 self._shares_dir_link = shares_dir_link949 self._shares_dir_link = shares_dir_link
857 self._version_file = os.path.join(self._data_dir, '.version')950 self._version_file = os.path.join(self._data_dir, '.version')
@@ -878,8 +971,58 @@
878 if not md_version:971 if not md_version:
879 # we don't have a version of the metadata but a .version file?972 # we don't have a version of the metadata but a .version file?
880 # assume it's None and do an upgrade from version 0973 # assume it's None and do an upgrade from version 0
881 md_version = None974 md_version = self._guess_metadata_version()
882 else:975 else:
976 md_version = self._guess_metadata_version()
977 self.log.debug('metadata version: %s', md_version)
978 return md_version
979
980 def _guess_metadata_version(self):
981 """Try to guess the metadata version based on current metadata
982 and layout, fallbacks to md_version = None if can't guess it.
983
984 """
985 #md_version = None
986 if os.path.exists(self._shares_md_dir) \
987 and os.path.exists(self._shared_md_dir):
988 # we have shares and shared dirs
989 # md_version >= 1
990 old_root_dir = os.path.join(self._root_dir, 'My Files')
991 old_share_dir = os.path.join(self._root_dir, 'Shared With Me')
992 if os.path.exists(old_share_dir) and os.path.exists(old_root_dir) \
993 and not os.path.islink(old_share_dir):
994 # md >= 1 and <= 3
995 # we have a My Files dir, 'Shared With Me' isn't a
996 # symlink and ~/.local/share/ubuntuone/shares doesn't
997 # exists.
998 # md_version <= 3, set it to 2 as it will migrate
999 # .conflict to .u1conflict, and we don't need to upgrade
1000 # from version 1 any more as the LegacyShareFileShelf
1001 # takes care of that.
1002 md_version = '2'
1003 else:
1004 try:
1005 target = os.readlink(self._shares_dir_link)
1006 except OSError:
1007 target = None
1008 if os.path.islink(self._shares_dir_link) \
1009 and os.path.normpath(target) == self._shares_dir_link:
1010 # broken symlink, md_version = 4
1011 md_version = '4'
1012 else:
1013 # md_version >= 5
1014 shelf = LegacyShareFileShelf(self._shares_md_dir)
1015 # check a pickled value to check if it's in version
1016 # 5 or 6
1017 for key in shelf:
1018 share = shelf[key]
1019 if isinstance(share, _Share):
1020 md_version = '5'
1021 else:
1022 md_version = '6'
1023 break
1024 else:
1025 # this is metadata 'None'
883 md_version = None1026 md_version = None
884 return md_version1027 return md_version
8851028
@@ -902,9 +1045,6 @@
902 if dir != os.path.basename(backup):1045 if dir != os.path.basename(backup):
903 shutil.move(os.path.join(dirname, dir),1046 shutil.move(os.path.join(dirname, dir),
904 os.path.join(backup, dir))1047 os.path.join(backup, dir))
905 # add the old module FQN to sys.modules in order to load the metadata
906 sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager'] = \
907 sys.modules['ubuntuone.syncdaemon.volume_manager']
908 # regenerate the shelf using the new layout using the backup as src1048 # regenerate the shelf using the new layout using the backup as src
909 old_shelf = LegacyShareFileShelf(backup)1049 old_shelf = LegacyShareFileShelf(backup)
910 if not os.path.exists(self._shares_dir):1050 if not os.path.exists(self._shares_dir):
@@ -912,9 +1052,7 @@
912 new_shelf = LegacyShareFileShelf(self._shares_md_dir)1052 new_shelf = LegacyShareFileShelf(self._shares_md_dir)
913 for key in old_shelf.keys():1053 for key in old_shelf.keys():
914 new_shelf[key] = old_shelf[key]1054 new_shelf[key] = old_shelf[key]
915 # undo the change to sys.modules1055 # now upgrade to metadata 2
916 del sys.modules['canonical.ubuntuone.storage.syncdaemon.volume_manager']
917 # now upgrade to metadata 3
918 self._upgrade_metadata_2(md_version)1056 self._upgrade_metadata_2(md_version)
9191057
920 def _upgrade_metadata_1(self, md_version):1058 def _upgrade_metadata_1(self, md_version):
@@ -946,7 +1084,6 @@
946 for names in filenames, dirnames:1084 for names in filenames, dirnames:
947 self._upgrade_names(dirpath, names)1085 self._upgrade_names(dirpath, names)
948 self._upgrade_metadata_3(md_version)1086 self._upgrade_metadata_3(md_version)
949 self.update_metadata_version()
9501087
951 def _upgrade_names(self, dirpath, names):1088 def _upgrade_names(self, dirpath, names):
952 """1089 """
@@ -997,6 +1134,14 @@
997 old_root_dir = os.path.join(self._root_dir, 'My Files')1134 old_root_dir = os.path.join(self._root_dir, 'My Files')
998 # change permissions1135 # change permissions
999 os.chmod(self._root_dir, 0775)1136 os.chmod(self._root_dir, 0775)
1137
1138 def move(src, dst):
1139 """Move a file/dir taking care if it's read-only."""
1140 prev_mode = stat.S_IMODE(os.stat(src).st_mode)
1141 os.chmod(src, 0755)
1142 shutil.move(src, dst)
1143 os.chmod(dst, prev_mode)
1144
1000 # update the path's in metadata and move the folder1145 # update the path's in metadata and move the folder
1001 if os.path.exists(old_share_dir) and not os.path.islink(old_share_dir):1146 if os.path.exists(old_share_dir) and not os.path.islink(old_share_dir):
1002 os.chmod(old_share_dir, 0775)1147 os.chmod(old_share_dir, 0775)
@@ -1004,14 +1149,23 @@
1004 os.makedirs(os.path.dirname(self._shares_dir))1149 os.makedirs(os.path.dirname(self._shares_dir))
1005 self.log.debug('moving shares dir from: %r to %r',1150 self.log.debug('moving shares dir from: %r to %r',
1006 old_share_dir, self._shares_dir)1151 old_share_dir, self._shares_dir)
1007 shutil.move(old_share_dir, self._shares_dir)1152 for path in os.listdir(old_share_dir):
1153 src = os.path.join(old_share_dir, path)
1154 dst = os.path.join(self._shares_dir, path)
1155 move(src, dst)
1156 os.rmdir(old_share_dir)
1157
1008 # update the shares metadata1158 # update the shares metadata
1009 shares = LegacyShareFileShelf(self._shares_md_dir)1159 shares = LegacyShareFileShelf(self._shares_md_dir)
1010 for key in shares.keys():1160 for key in shares.keys():
1011 share = shares[key]1161 share = shares[key]
1012 if share.path is not None:1162 if share.path is not None:
1013 share.path = share.path.replace(old_share_dir,1163 if share.path == old_root_dir:
1014 self._shares_dir)1164 share.path = share.path.replace(old_root_dir,
1165 self._root_dir)
1166 else:
1167 share.path = share.path.replace(old_share_dir,
1168 self._shares_dir)
1015 shares[key] = share1169 shares[key] = share
10161170
1017 shared = LegacyShareFileShelf(self._shared_md_dir)1171 shared = LegacyShareFileShelf(self._shared_md_dir)
@@ -1021,7 +1175,7 @@
1021 share.path = share.path.replace(old_root_dir, self._root_dir)1175 share.path = share.path.replace(old_root_dir, self._root_dir)
1022 shared[key] = share1176 shared[key] = share
1023 # move the My Files contents, taking care of dir/files with the same1177 # move the My Files contents, taking care of dir/files with the same
1024 # in the new root1178 # name in the new root
1025 if os.path.exists(old_root_dir):1179 if os.path.exists(old_root_dir):
1026 self.log.debug('moving My Files contents to the root')1180 self.log.debug('moving My Files contents to the root')
1027 # make My Files rw1181 # make My Files rw
@@ -1038,10 +1192,11 @@
1038 os.remove(old_path)1192 os.remove(old_path)
1039 else:1193 else:
1040 self.log.debug('moving %r to %r', old_path, new_path)1194 self.log.debug('moving %r to %r', old_path, new_path)
1041 shutil.move(old_path, new_path)1195 move(old_path, new_path)
1042 self.log.debug('removing old root: %r', old_root_dir)1196 self.log.debug('removing old root: %r', old_root_dir)
1043 os.rmdir(old_root_dir)1197 os.rmdir(old_root_dir)
10441198
1199 # fix broken symlink (md_version 4)
1045 self._upgrade_metadata_4(md_version)1200 self._upgrade_metadata_4(md_version)
10461201
1047 def _upgrade_metadata_4(self, md_version):1202 def _upgrade_metadata_4(self, md_version):
@@ -1056,9 +1211,61 @@
1056 self.log.debug('removing broken shares symlink: %r -> %r',1211 self.log.debug('removing broken shares symlink: %r -> %r',
1057 self._shares_dir_link, target)1212 self._shares_dir_link, target)
1058 os.remove(self._shares_dir_link)1213 os.remove(self._shares_dir_link)
1214 self._upgrade_metadata_5(md_version)
10591215
1216 def _upgrade_metadata_5(self, md_version):
1217 """
1218 Upgrade to version 6 (plain dict storage)
1219 """
1220 self.log.debug('upgrading from metadata 5')
1221 # upgrade shares
1222 old_shares = LegacyShareFileShelf(self._shares_md_dir)
1223 shares = VMFileShelf(self._shares_md_dir)
1224 for key in old_shares.keys():
1225 share = old_shares[key]
1226 shares[key] = self._upgrade_share_to_volume(share)
1227 # upgrade shared folders
1228 old_shared = LegacyShareFileShelf(self._shared_md_dir)
1229 shared = VMFileShelf(self._shared_md_dir)
1230 for key in shared.keys():
1231 share = old_shared[key]
1232 shared[key] = self._upgrade_share_to_volume(share, shared=True)
1233 # upgrade the udfs
1234 old_udfs = LegacyShareFileShelf(self._udfs_md_dir)
1235 udfs = VMFileShelf(self._udfs_md_dir)
1236 for key in old_udfs.keys():
1237 udf = old_udfs[key]
1238 udfs[key] = UDF(udf.id, udf.node_id, udf.suggested_path,
1239 udf.path, udf.subscribed)
1060 self.update_metadata_version()1240 self.update_metadata_version()
10611241
1242 def _upgrade_share_to_volume(self, share, shared=False):
1243 """Upgrade from _Share to new Volume hierarchy."""
1244 def upgrade_share_dict(share):
1245 """Upgrade share __dict__ to be compatible with the
1246 new Share.__init__.
1247
1248 """
1249 if 'subtree' in share.__dict__:
1250 share.node_id = share.__dict__.pop('subtree')
1251 if 'id' in share.__dict__:
1252 share.volume_id = share.__dict__.pop('id')
1253 if 'free_bytes' in share.__dict__:
1254 free_bytes = share.__dict__.pop('free_bytes')
1255 else:
1256 free_bytes = None
1257 return share
1258 # handle the root special case
1259 if share.path == self._root_dir or share.id == '':
1260 r = Root(share.id, share.subtree, share.path)
1261 return r
1262 else:
1263 share = upgrade_share_dict(share)
1264 if shared:
1265 return Shared(**share.__dict__)
1266 else:
1267 return Share(**share.__dict__)
1268
1062 def update_metadata_version(self):1269 def update_metadata_version(self):
1063 """write the version of the metadata"""1270 """write the version of the metadata"""
1064 if not os.path.exists(os.path.dirname(self._version_file)):1271 if not os.path.exists(os.path.dirname(self._version_file)):
@@ -1082,8 +1289,13 @@
1082class VMFileShelf(file_shelf.FileShelf):1289class VMFileShelf(file_shelf.FileShelf):
1083 """ Custom file shelf that allow request.ROOT as key, it's replaced1290 """ Custom file shelf that allow request.ROOT as key, it's replaced
1084 by the string: root_node_id.1291 by the string: root_node_id.
1292
1085 """1293 """
10861294
1295 TYPE = 'type'
1296 classes = dict((sub.__name__, sub) for sub in \
1297 Volume.__subclasses__() + Share.__subclasses__())
1298
1087 def __init__(self, *args, **kwargs):1299 def __init__(self, *args, **kwargs):
1088 """ Create the instance. """1300 """ Create the instance. """
1089 super(VMFileShelf, self).__init__(*args, **kwargs)1301 super(VMFileShelf, self).__init__(*args, **kwargs)
@@ -1103,6 +1315,22 @@
1103 else:1315 else:
1104 yield key1316 yield key
11051317
1318 def _unpickle(self, fd):
1319 """Unpickle a dict and build the class instance specified in
1320 value['type'].
1321 """
1322 value = cPickle.load(fd)
1323 class_name = value[self.TYPE]
1324 clazz = self.classes[class_name]
1325 obj = clazz.__new__(clazz)
1326 obj.__dict__.update(value)
1327 return obj
1328
1329 def _pickle(self, value, fd, protocol):
1330 """Pickle value in fd using protocol."""
1331 cPickle.dump(value.__dict__, fd, protocol=protocol)
1332
1333
1106class LegacyShareFileShelf(VMFileShelf):1334class LegacyShareFileShelf(VMFileShelf):
1107 """A FileShelf capable of replacing pickled classes1335 """A FileShelf capable of replacing pickled classes
1108 with a different class.1336 with a different class.
@@ -1112,11 +1340,12 @@
1112 """1340 """
11131341
1114 upgrade_map = {1342 upgrade_map = {
1343 ('ubuntuone.syncdaemon.volume_manager', 'UDF'):_UDF,
1344 ('ubuntuone.syncdaemon.volume_manager', 'Share'):_Share,
1115 ('canonical.ubuntuone.storage.syncdaemon.volume_manager',1345 ('canonical.ubuntuone.storage.syncdaemon.volume_manager',
1116 'Share'):Share1346 'Share'):_Share
1117 }1347 }
11181348
1119
1120 def _find_global(self, module, name):1349 def _find_global(self, module, name):
1121 """Returns the class object for (module, name) or None."""1350 """Returns the class object for (module, name) or None."""
1122 # handle our 'migration types'1351 # handle our 'migration types'

Subscribers

People subscribed via source and target branches