Merge lp:~statik/ubuntuone-client/fix-lint into lp:ubuntuone-client
- fix-lint
- Merge into trunk
Proposed by
Elliot Murphy
Status: | Merged |
---|---|
Approved by: | Eric Casteleijn |
Approved revision: | not available |
Merged at revision: | not available |
Proposed branch: | lp:~statik/ubuntuone-client/fix-lint |
Merge into: | lp:ubuntuone-client |
Diff against target: |
417 lines (+36/-49) 17 files modified
tests/syncdaemon/test_action_predicates.py (+4/-4) tests/syncdaemon/test_action_queue.py (+3/-3) tests/syncdaemon/test_dbus.py (+0/-5) tests/syncdaemon/test_eq_inotify.py (+0/-3) tests/syncdaemon/test_fileshelf.py (+1/-1) tests/syncdaemon/test_fsm.py (+12/-12) tests/syncdaemon/test_hashqueue.py (+0/-1) tests/syncdaemon/test_localrescan.py (+3/-3) tests/syncdaemon/test_sync.py (+1/-1) tests/syncdaemon/test_vm.py (+1/-5) ubuntuone/oauthdesktop/auth.py (+1/-1) ubuntuone/syncdaemon/dbus_interface.py (+1/-1) ubuntuone/syncdaemon/events_nanny.py (+1/-1) ubuntuone/syncdaemon/filesystem_manager.py (+1/-2) ubuntuone/syncdaemon/volume_manager.py (+5/-4) ubuntuone/u1sync/client.py (+1/-1) ubuntuone/u1sync/main.py (+1/-1) |
To merge this branch: | bzr merge lp:~statik/ubuntuone-client/fix-lint |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Eric Casteleijn (community) | Approve | ||
Rick McBride (community) | Approve | ||
Review via email: mp+19036@code.launchpad.net |
Commit message
Fix all lint warnings on lucid
Description of the change
To post a comment you must log in.
Revision history for this message
Elliot Murphy (statik) wrote : | # |
Revision history for this message
Rick McBride (rmcbride) wrote : | # |
YAY no more lint warnings on Lucid
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'tests/syncdaemon/test_action_predicates.py' | |||
2 | --- tests/syncdaemon/test_action_predicates.py 2010-01-13 20:07:41 +0000 | |||
3 | +++ tests/syncdaemon/test_action_predicates.py 2010-02-10 17:40:25 +0000 | |||
4 | @@ -86,10 +86,10 @@ | |||
5 | 86 | def test_simple_commands(self): | 86 | def test_simple_commands(self): |
6 | 87 | """Test command execution.""" | 87 | """Test command execution.""" |
7 | 88 | out = [] | 88 | out = [] |
12 | 89 | cmd = AppendCommand(self.request_queue, | 89 | AppendCommand(self.request_queue, |
13 | 90 | append_to=out, value="a").start() | 90 | append_to=out, value="a").start() |
14 | 91 | cmd = AppendCommand(self.request_queue, | 91 | AppendCommand(self.request_queue, |
15 | 92 | append_to=out, value="b").start() | 92 | append_to=out, value="b").start() |
16 | 93 | self.request_queue.run() | 93 | self.request_queue.run() |
17 | 94 | self.request_queue.run() | 94 | self.request_queue.run() |
18 | 95 | self.assertEqual(["a", "b"], out) | 95 | self.assertEqual(["a", "b"], out) |
19 | 96 | 96 | ||
20 | === modified file 'tests/syncdaemon/test_action_queue.py' | |||
21 | --- tests/syncdaemon/test_action_queue.py 2010-01-29 18:09:48 +0000 | |||
22 | +++ tests/syncdaemon/test_action_queue.py 2010-02-10 17:40:25 +0000 | |||
23 | @@ -360,7 +360,7 @@ | |||
24 | 360 | 360 | ||
25 | 361 | self.command.action_queue.client.create_udf = check | 361 | self.command.action_queue.client.create_udf = check |
26 | 362 | 362 | ||
28 | 363 | res = self.command._run() | 363 | self.command._run() |
29 | 364 | 364 | ||
30 | 365 | self.assertTrue(self.called, 'command was called') | 365 | self.assertTrue(self.called, 'command was called') |
31 | 366 | 366 | ||
32 | @@ -420,7 +420,7 @@ | |||
33 | 420 | 420 | ||
34 | 421 | self.command.action_queue.client.list_volumes = check | 421 | self.command.action_queue.client.list_volumes = check |
35 | 422 | 422 | ||
37 | 423 | res = self.command._run() | 423 | self.command._run() |
38 | 424 | 424 | ||
39 | 425 | self.assertTrue(self.called, 'command was called') | 425 | self.assertTrue(self.called, 'command was called') |
40 | 426 | 426 | ||
41 | @@ -481,7 +481,7 @@ | |||
42 | 481 | 481 | ||
43 | 482 | self.command.action_queue.client.delete_volume = check | 482 | self.command.action_queue.client.delete_volume = check |
44 | 483 | 483 | ||
46 | 484 | res = self.command._run() | 484 | self.command._run() |
47 | 485 | 485 | ||
48 | 486 | self.assertTrue(self.called, 'command was called') | 486 | self.assertTrue(self.called, 'command was called') |
49 | 487 | 487 | ||
50 | 488 | 488 | ||
51 | === modified file 'tests/syncdaemon/test_dbus.py' | |||
52 | --- tests/syncdaemon/test_dbus.py 2010-02-04 18:04:41 +0000 | |||
53 | +++ tests/syncdaemon/test_dbus.py 2010-02-10 17:40:25 +0000 | |||
54 | @@ -1657,8 +1657,6 @@ | |||
55 | 1657 | def test_create_server_error(self): | 1657 | def test_create_server_error(self): |
56 | 1658 | """Test for Folders.create.""" | 1658 | """Test for Folders.create.""" |
57 | 1659 | path = os.path.join(self.home_dir, u'ñoño') | 1659 | path = os.path.join(self.home_dir, u'ñoño') |
58 | 1660 | id = uuid.uuid4() | ||
59 | 1661 | node_id = uuid.uuid4() | ||
60 | 1662 | d = defer.Deferred() | 1660 | d = defer.Deferred() |
61 | 1663 | # patch AQ.create_udf | 1661 | # patch AQ.create_udf |
62 | 1664 | def create_udf(path, name, marker): | 1662 | def create_udf(path, name, marker): |
63 | @@ -1685,8 +1683,6 @@ | |||
64 | 1685 | def test_create_client_error(self): | 1683 | def test_create_client_error(self): |
65 | 1686 | """Test for Folders.create.""" | 1684 | """Test for Folders.create.""" |
66 | 1687 | path = os.path.join(self.home_dir, u'ñoño') | 1685 | path = os.path.join(self.home_dir, u'ñoño') |
67 | 1688 | id = uuid.uuid4() | ||
68 | 1689 | node_id = uuid.uuid4() | ||
69 | 1690 | d = defer.Deferred() | 1686 | d = defer.Deferred() |
70 | 1691 | # patch AQ.create_udf | 1687 | # patch AQ.create_udf |
71 | 1692 | def create_udf(path, name, marker): | 1688 | def create_udf(path, name, marker): |
72 | @@ -1789,7 +1785,6 @@ | |||
73 | 1789 | udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, | 1785 | udf = self._create_udf(uuid.uuid4(), 'node_id', suggested_path, |
74 | 1790 | subscribed=False) | 1786 | subscribed=False) |
75 | 1791 | yield self.main.vm.add_udf(udf) | 1787 | yield self.main.vm.add_udf(udf) |
76 | 1792 | signal_deferred = defer.Deferred() | ||
77 | 1793 | d = defer.Deferred() | 1788 | d = defer.Deferred() |
78 | 1794 | def subscribe_handler(info): | 1789 | def subscribe_handler(info): |
79 | 1795 | """FolderSubscribed handler.""" | 1790 | """FolderSubscribed handler.""" |
80 | 1796 | 1791 | ||
81 | === modified file 'tests/syncdaemon/test_eq_inotify.py' | |||
82 | --- tests/syncdaemon/test_eq_inotify.py 2010-02-08 19:24:06 +0000 | |||
83 | +++ tests/syncdaemon/test_eq_inotify.py 2010-02-10 17:40:25 +0000 | |||
84 | @@ -1165,7 +1165,6 @@ | |||
85 | 1165 | fromfile = os.path.join(self.root_dir, "mdid.u1partial.foo") | 1165 | fromfile = os.path.join(self.root_dir, "mdid.u1partial.foo") |
86 | 1166 | root_dir = os.path.join(self.root_dir, "my_files") | 1166 | root_dir = os.path.join(self.root_dir, "my_files") |
87 | 1167 | tofile = os.path.join(root_dir, "foo") | 1167 | tofile = os.path.join(root_dir, "foo") |
88 | 1168 | mypath = functools.partial(os.path.join, root_dir) | ||
89 | 1169 | os.mkdir(root_dir) | 1168 | os.mkdir(root_dir) |
90 | 1170 | open(fromfile, "w").close() | 1169 | open(fromfile, "w").close() |
91 | 1171 | self.eq.add_to_mute_filter("FS_FILE_CREATE", tofile) | 1170 | self.eq.add_to_mute_filter("FS_FILE_CREATE", tofile) |
92 | @@ -1366,7 +1365,6 @@ | |||
93 | 1366 | def test_move_udf_ancestor(self): | 1365 | def test_move_udf_ancestor(self): |
94 | 1367 | """UDF is unsubscribed on ancestor move.""" | 1366 | """UDF is unsubscribed on ancestor move.""" |
95 | 1368 | original = self.eq.fs.vm.unsubscribe_udf | 1367 | original = self.eq.fs.vm.unsubscribe_udf |
96 | 1369 | expected = [] | ||
97 | 1370 | path = self.udf.ancestors[-2] # an ancestor common to both UDFs | 1368 | path = self.udf.ancestors[-2] # an ancestor common to both UDFs |
98 | 1371 | # generate IN_MOVED_FROM and IN_MOVED_TO | 1369 | # generate IN_MOVED_FROM and IN_MOVED_TO |
99 | 1372 | newpath = path + u'.old' | 1370 | newpath = path + u'.old' |
100 | @@ -1405,7 +1403,6 @@ | |||
101 | 1405 | def test_move_udf_itself(self): | 1403 | def test_move_udf_itself(self): |
102 | 1406 | """UDF is unsubscribed if renamed.""" | 1404 | """UDF is unsubscribed if renamed.""" |
103 | 1407 | original = self.eq.fs.vm.unsubscribe_udf | 1405 | original = self.eq.fs.vm.unsubscribe_udf |
104 | 1408 | expected = [] | ||
105 | 1409 | newpath = self.udf.path + u'.old' | 1406 | newpath = self.udf.path + u'.old' |
106 | 1410 | os.rename(self.udf.path, newpath) | 1407 | os.rename(self.udf.path, newpath) |
107 | 1411 | assert os.path.exists(newpath) | 1408 | assert os.path.exists(newpath) |
108 | 1412 | 1409 | ||
109 | === modified file 'tests/syncdaemon/test_fileshelf.py' | |||
110 | --- tests/syncdaemon/test_fileshelf.py 2010-01-15 20:04:32 +0000 | |||
111 | +++ tests/syncdaemon/test_fileshelf.py 2010-02-10 17:40:25 +0000 | |||
112 | @@ -226,7 +226,7 @@ | |||
113 | 226 | 226 | ||
114 | 227 | def test_custom_unpickle(self): | 227 | def test_custom_unpickle(self): |
115 | 228 | """Test the _pickle and _unpikle methods.""" | 228 | """Test the _pickle and _unpikle methods.""" |
117 | 229 | path = self.mktemp('my_shelf') | 229 | self.mktemp('my_shelf') |
118 | 230 | class InMemoryFileShelf(FileShelf): | 230 | class InMemoryFileShelf(FileShelf): |
119 | 231 | """A in-memory FileShelf.""" | 231 | """A in-memory FileShelf.""" |
120 | 232 | values = {} | 232 | values = {} |
121 | 233 | 233 | ||
122 | === modified file 'tests/syncdaemon/test_fsm.py' | |||
123 | --- tests/syncdaemon/test_fsm.py 2010-01-26 19:46:37 +0000 | |||
124 | +++ tests/syncdaemon/test_fsm.py 2010-02-10 17:40:25 +0000 | |||
125 | @@ -1853,7 +1853,7 @@ | |||
126 | 1853 | """Test that a dir is deleted, when is not empty and modified.""" | 1853 | """Test that a dir is deleted, when is not empty and modified.""" |
127 | 1854 | local_dir = os.path.join(self.root_dir, "foo") | 1854 | local_dir = os.path.join(self.root_dir, "foo") |
128 | 1855 | os.mkdir(local_dir) | 1855 | os.mkdir(local_dir) |
130 | 1856 | mdid = self.fsm.create(local_dir, "", is_dir=True) | 1856 | self.fsm.create(local_dir, "", is_dir=True) |
131 | 1857 | self.fsm.set_node_id(local_dir, "uuid") | 1857 | self.fsm.set_node_id(local_dir, "uuid") |
132 | 1858 | 1858 | ||
133 | 1859 | local_file = os.path.join(local_dir, "bar.txt") | 1859 | local_file = os.path.join(local_dir, "bar.txt") |
134 | @@ -1875,12 +1875,12 @@ | |||
135 | 1875 | 1875 | ||
136 | 1876 | local_dir = os.path.join(self.root_dir, "foo") | 1876 | local_dir = os.path.join(self.root_dir, "foo") |
137 | 1877 | os.mkdir(local_dir) | 1877 | os.mkdir(local_dir) |
139 | 1878 | mdid = self.fsm.create(local_dir, "", is_dir=True) | 1878 | self.fsm.create(local_dir, "", is_dir=True) |
140 | 1879 | self.fsm.set_node_id(local_dir, "uuid") | 1879 | self.fsm.set_node_id(local_dir, "uuid") |
141 | 1880 | 1880 | ||
142 | 1881 | local_file = os.path.join(local_dir, "bar.txt") | 1881 | local_file = os.path.join(local_dir, "bar.txt") |
143 | 1882 | open(local_file, 'w').close() # touch bar.txt so it exists | 1882 | open(local_file, 'w').close() # touch bar.txt so it exists |
145 | 1883 | mdid_file = self.fsm.create(local_file, "") | 1883 | self.fsm.create(local_file, "") |
146 | 1884 | self.fsm.set_node_id(local_file, "uuid_file") | 1884 | self.fsm.set_node_id(local_file, "uuid_file") |
147 | 1885 | 1885 | ||
148 | 1886 | self.fsm.delete_file(local_dir) | 1886 | self.fsm.delete_file(local_dir) |
149 | @@ -1898,7 +1898,7 @@ | |||
150 | 1898 | log.read() # ignore log content till now | 1898 | log.read() # ignore log content till now |
151 | 1899 | 1899 | ||
152 | 1900 | local_dir = os.path.join(self.root_dir, "foo") | 1900 | local_dir = os.path.join(self.root_dir, "foo") |
154 | 1901 | mdid = self.fsm.create(local_dir, "", is_dir=True) | 1901 | self.fsm.create(local_dir, "", is_dir=True) |
155 | 1902 | self.fsm.set_node_id(local_dir, "uuid") | 1902 | self.fsm.set_node_id(local_dir, "uuid") |
156 | 1903 | 1903 | ||
157 | 1904 | # local_dir does not exist on the file system | 1904 | # local_dir does not exist on the file system |
158 | @@ -2006,7 +2006,7 @@ | |||
159 | 2006 | """Test the recursive changed feature for a non empty dir.""" | 2006 | """Test the recursive changed feature for a non empty dir.""" |
160 | 2007 | local_dir = os.path.join(self.root_dir, "foo") | 2007 | local_dir = os.path.join(self.root_dir, "foo") |
161 | 2008 | os.mkdir(local_dir) | 2008 | os.mkdir(local_dir) |
163 | 2009 | mdid = self.fsm.create(local_dir, "", is_dir=True) | 2009 | self.fsm.create(local_dir, "", is_dir=True) |
164 | 2010 | self.fsm.set_node_id(local_dir, "uuid") | 2010 | self.fsm.set_node_id(local_dir, "uuid") |
165 | 2011 | 2011 | ||
166 | 2012 | sub_dir = os.path.join(local_dir, "bar") | 2012 | sub_dir = os.path.join(local_dir, "bar") |
167 | @@ -2707,11 +2707,11 @@ | |||
168 | 2707 | if os.path.exists(d): | 2707 | if os.path.exists(d): |
169 | 2708 | shutil.rmtree(d) | 2708 | shutil.rmtree(d) |
170 | 2709 | os.mkdir(d) | 2709 | os.mkdir(d) |
172 | 2710 | mdid = self.fsm.create(d, '', is_dir=True) | 2710 | self.fsm.create(d, '', is_dir=True) |
173 | 2711 | self.fsm.set_node_id(d, 'uuid') | 2711 | self.fsm.set_node_id(d, 'uuid') |
174 | 2712 | 2712 | ||
175 | 2713 | open(self.some_file, 'w').close() | 2713 | open(self.some_file, 'w').close() |
177 | 2714 | mdid_file = self.fsm.create(self.some_file, "") | 2714 | self.fsm.create(self.some_file, "") |
178 | 2715 | self.fsm.set_node_id(self.some_file, "uuid_file") | 2715 | self.fsm.set_node_id(self.some_file, "uuid_file") |
179 | 2716 | 2716 | ||
180 | 2717 | def tearDown(self): | 2717 | def tearDown(self): |
181 | @@ -2737,7 +2737,7 @@ | |||
182 | 2737 | """Check paths starting with excluding directories with same prefix.""" | 2737 | """Check paths starting with excluding directories with same prefix.""" |
183 | 2738 | similar_dir = os.path.join(self.root_dir, 'fooo') | 2738 | similar_dir = os.path.join(self.root_dir, 'fooo') |
184 | 2739 | os.mkdir(similar_dir) | 2739 | os.mkdir(similar_dir) |
186 | 2740 | mdid = self.fsm.create(similar_dir, '', is_dir=True) | 2740 | self.fsm.create(similar_dir, '', is_dir=True) |
187 | 2741 | self.fsm.set_node_id(similar_dir, 'uuid') | 2741 | self.fsm.set_node_id(similar_dir, 'uuid') |
188 | 2742 | 2742 | ||
189 | 2743 | expected = sorted([(self.some_dir, True), (self.sub_dir, True), | 2743 | expected = sorted([(self.some_dir, True), (self.sub_dir, True), |
190 | @@ -2759,15 +2759,15 @@ | |||
191 | 2759 | def test_get_for_server_rescan_by_path(self): | 2759 | def test_get_for_server_rescan_by_path(self): |
192 | 2760 | """Test FSM.get_for_server_rescan_by_path method""" | 2760 | """Test FSM.get_for_server_rescan_by_path method""" |
193 | 2761 | # create the share fsm object | 2761 | # create the share fsm object |
195 | 2762 | share_mdid = self.fsm.create(self.share_path, self.share.volume_id) | 2762 | self.fsm.create(self.share_path, self.share.volume_id) |
196 | 2763 | self.fsm.set_node_id(self.share_path, "share_uuid") | 2763 | self.fsm.set_node_id(self.share_path, "share_uuid") |
197 | 2764 | # create a few nodes | 2764 | # create a few nodes |
198 | 2765 | path1 = os.path.join(self.share_path, "path1") | 2765 | path1 = os.path.join(self.share_path, "path1") |
199 | 2766 | path2 = os.path.join(self.share_path, "path1", "path2") | 2766 | path2 = os.path.join(self.share_path, "path1", "path2") |
200 | 2767 | path_out = os.path.join(self.root_dir, "path1") | 2767 | path_out = os.path.join(self.root_dir, "path1") |
204 | 2768 | mdid1 = self.fsm.create(path1, "share", is_dir=True) | 2768 | self.fsm.create(path1, "share", is_dir=True) |
205 | 2769 | mdid2 = self.fsm.create(path2, "share") | 2769 | self.fsm.create(path2, "share") |
206 | 2770 | mdid_out = self.fsm.create(path_out, "") | 2770 | self.fsm.create(path_out, "") |
207 | 2771 | self.fsm.set_node_id(path1, "uuid1") | 2771 | self.fsm.set_node_id(path1, "uuid1") |
208 | 2772 | self.fsm.set_node_id(path2, "uuid2") | 2772 | self.fsm.set_node_id(path2, "uuid2") |
209 | 2773 | self.fsm.set_node_id(path_out, "uuid3") | 2773 | self.fsm.set_node_id(path_out, "uuid3") |
210 | 2774 | 2774 | ||
211 | === modified file 'tests/syncdaemon/test_hashqueue.py' | |||
212 | --- tests/syncdaemon/test_hashqueue.py 2010-02-05 02:01:13 +0000 | |||
213 | +++ tests/syncdaemon/test_hashqueue.py 2010-02-10 17:40:25 +0000 | |||
214 | @@ -540,7 +540,6 @@ | |||
215 | 540 | hasher = content_hash_factory() | 540 | hasher = content_hash_factory() |
216 | 541 | hasher.hash_object.update(testinfo) | 541 | hasher.hash_object.update(testinfo) |
217 | 542 | testfile = os.path.join(self.test_dir, "testfile") | 542 | testfile = os.path.join(self.test_dir, "testfile") |
218 | 543 | testhash = hasher.content_hash() | ||
219 | 544 | # send what to hash | 543 | # send what to hash |
220 | 545 | with open(testfile, "w") as fh: | 544 | with open(testfile, "w") as fh: |
221 | 546 | fh.write(testinfo) | 545 | fh.write(testinfo) |
222 | 547 | 546 | ||
223 | === modified file 'tests/syncdaemon/test_localrescan.py' | |||
224 | --- tests/syncdaemon/test_localrescan.py 2010-02-09 14:21:08 +0000 | |||
225 | +++ tests/syncdaemon/test_localrescan.py 2010-02-10 17:40:25 +0000 | |||
226 | @@ -1679,8 +1679,8 @@ | |||
227 | 1679 | """The file is created but never started to download.""" | 1679 | """The file is created but never started to download.""" |
228 | 1680 | # create the file in metadata | 1680 | # create the file in metadata |
229 | 1681 | path = os.path.join(self.share.path, "a") | 1681 | path = os.path.join(self.share.path, "a") |
232 | 1682 | # open(path, "w").close() | 1682 | # open(path, "w").close() |
233 | 1683 | mdid = self.fsm.create(path, self.share.volume_id, is_dir=False, node_id="1") | 1683 | self.fsm.create(path, self.share.volume_id, is_dir=False, node_id="1") |
234 | 1684 | 1684 | ||
235 | 1685 | def check(_): | 1685 | def check(_): |
236 | 1686 | """No event, and no MD""" | 1686 | """No event, and no MD""" |
237 | @@ -1712,7 +1712,7 @@ | |||
238 | 1712 | os.mkdir(dir) | 1712 | os.mkdir(dir) |
239 | 1713 | path_b = os.path.join(self.share.path, "dir", "b") | 1713 | path_b = os.path.join(self.share.path, "dir", "b") |
240 | 1714 | open(path_b, "w").close() | 1714 | open(path_b, "w").close() |
242 | 1715 | mdid_dir = self.fsm.create(dir, self.share.volume_id, is_dir=True) | 1715 | self.fsm.create(dir, self.share.volume_id, is_dir=True) |
243 | 1716 | self.fsm.set_node_id(dir, "uuid2") | 1716 | self.fsm.set_node_id(dir, "uuid2") |
244 | 1717 | 1717 | ||
245 | 1718 | mdid_b = self.fsm.create(path_b, self.share.volume_id, is_dir=False) | 1718 | mdid_b = self.fsm.create(path_b, self.share.volume_id, is_dir=False) |
246 | 1719 | 1719 | ||
247 | === modified file 'tests/syncdaemon/test_sync.py' | |||
248 | --- tests/syncdaemon/test_sync.py 2010-01-22 20:31:51 +0000 | |||
249 | +++ tests/syncdaemon/test_sync.py 2010-02-10 17:40:25 +0000 | |||
250 | @@ -104,7 +104,7 @@ | |||
251 | 104 | def test_set(self): | 104 | def test_set(self): |
252 | 105 | """test that changes to the key are keeped in _changes until sync""" | 105 | """test that changes to the key are keeped in _changes until sync""" |
253 | 106 | path = os.path.join(self.share.path, 'path') | 106 | path = os.path.join(self.share.path, 'path') |
255 | 107 | mdid = self.fsm.create(path, "share", node_id='uuid1') | 107 | self.fsm.create(path, "share", node_id='uuid1') |
256 | 108 | key = FSKey(self.fsm, path=path) | 108 | key = FSKey(self.fsm, path=path) |
257 | 109 | key.set(local_hash='a_hash') | 109 | key.set(local_hash='a_hash') |
258 | 110 | self.assertEquals('a_hash', key._changes['local_hash']) | 110 | self.assertEquals('a_hash', key._changes['local_hash']) |
259 | 111 | 111 | ||
260 | === modified file 'tests/syncdaemon/test_vm.py' | |||
261 | --- tests/syncdaemon/test_vm.py 2010-02-01 13:43:33 +0000 | |||
262 | +++ tests/syncdaemon/test_vm.py 2010-02-10 17:40:25 +0000 | |||
263 | @@ -474,7 +474,7 @@ | |||
264 | 474 | """Test for VolumeManager._delete_fsm_object""" | 474 | """Test for VolumeManager._delete_fsm_object""" |
265 | 475 | path = os.path.join(self.root_dir, 'dir') | 475 | path = os.path.join(self.root_dir, 'dir') |
266 | 476 | os.makedirs(path) | 476 | os.makedirs(path) |
268 | 477 | mdid = self.main.fs.create(path, "", is_dir=True) | 477 | self.main.fs.create(path, "", is_dir=True) |
269 | 478 | self.main.fs.set_node_id(path, 'dir_node_id') | 478 | self.main.fs.set_node_id(path, 'dir_node_id') |
270 | 479 | self.main.event_q.inotify_add_watch(path) | 479 | self.main.event_q.inotify_add_watch(path) |
271 | 480 | self.assertTrue(self.main.event_q.inotify_has_watch(path), path) | 480 | self.assertTrue(self.main.event_q.inotify_has_watch(path), path) |
272 | @@ -858,7 +858,6 @@ | |||
273 | 858 | 858 | ||
274 | 859 | def test_handle_AQ_LIST_VOLUMES_root(self): | 859 | def test_handle_AQ_LIST_VOLUMES_root(self): |
275 | 860 | """Test the handling of the AQ_LIST_VOLUMES event.""" | 860 | """Test the handling of the AQ_LIST_VOLUMES event.""" |
276 | 861 | share_id = uuid.uuid4() | ||
277 | 862 | root_volume = volumes.RootVolume(uuid.uuid4()) | 861 | root_volume = volumes.RootVolume(uuid.uuid4()) |
278 | 863 | response = [root_volume] | 862 | response = [root_volume] |
279 | 864 | self.vm.refresh_volumes = lambda: self.fail('refresh_volumes called!') | 863 | self.vm.refresh_volumes = lambda: self.fail('refresh_volumes called!') |
280 | @@ -1209,8 +1208,6 @@ | |||
281 | 1209 | """Test for handle_AQ_CREATE_UDF_ERROR.""" | 1208 | """Test for handle_AQ_CREATE_UDF_ERROR.""" |
282 | 1210 | d = defer.Deferred() | 1209 | d = defer.Deferred() |
283 | 1211 | path = os.path.join(self.home_dir, u'ñoño'.encode("utf8")) | 1210 | path = os.path.join(self.home_dir, u'ñoño'.encode("utf8")) |
284 | 1212 | udf_id = uuid.uuid4() | ||
285 | 1213 | node_id = uuid.uuid4() | ||
286 | 1214 | # patch AQ.create_udf | 1211 | # patch AQ.create_udf |
287 | 1215 | def create_udf(path, name, marker): | 1212 | def create_udf(path, name, marker): |
288 | 1216 | """Fake create_udf""" | 1213 | """Fake create_udf""" |
289 | @@ -1393,7 +1390,6 @@ | |||
290 | 1393 | node_id=str(uuid.uuid4()), | 1390 | node_id=str(uuid.uuid4()), |
291 | 1394 | volume_id='accepted_share_id', | 1391 | volume_id='accepted_share_id', |
292 | 1395 | access_level='Modify', accepted=False) | 1392 | access_level='Modify', accepted=False) |
293 | 1396 | share_path_view = os.path.join(self.shares_dir, 'fake_share_view') | ||
294 | 1397 | share_view = Share(path=share_path, volume_id='share_id_view', | 1393 | share_view = Share(path=share_path, volume_id='share_id_view', |
295 | 1398 | access_level='View', accepted=True) | 1394 | access_level='View', accepted=True) |
296 | 1399 | self.vm.add_share(share_modify) | 1395 | self.vm.add_share(share_modify) |
297 | 1400 | 1396 | ||
298 | === modified file 'ubuntuone/oauthdesktop/auth.py' | |||
299 | --- ubuntuone/oauthdesktop/auth.py 2009-12-23 19:44:00 +0000 | |||
300 | +++ ubuntuone/oauthdesktop/auth.py 2010-02-10 17:40:25 +0000 | |||
301 | @@ -90,7 +90,7 @@ | |||
302 | 90 | newurl = headers['uri'] | 90 | newurl = headers['uri'] |
303 | 91 | else: | 91 | else: |
304 | 92 | return | 92 | return |
306 | 93 | void = fp.read() | 93 | fp.read() |
307 | 94 | fp.close() | 94 | fp.close() |
308 | 95 | # In case the server sent a relative URL, join with original: | 95 | # In case the server sent a relative URL, join with original: |
309 | 96 | newurl = urllib.basejoin(self.type + ":" + url, newurl) | 96 | newurl = urllib.basejoin(self.type + ":" + url, newurl) |
310 | 97 | 97 | ||
311 | === modified file 'ubuntuone/syncdaemon/dbus_interface.py' | |||
312 | --- ubuntuone/syncdaemon/dbus_interface.py 2010-02-01 17:48:25 +0000 | |||
313 | +++ ubuntuone/syncdaemon/dbus_interface.py 2010-02-10 17:40:25 +0000 | |||
314 | @@ -1434,7 +1434,7 @@ | |||
315 | 1434 | try: | 1434 | try: |
316 | 1435 | access_token = self.main.get_access_token() | 1435 | access_token = self.main.get_access_token() |
317 | 1436 | self.event_queue.push('SYS_CONNECT', access_token) | 1436 | self.event_queue.push('SYS_CONNECT', access_token) |
319 | 1437 | except NoAccessToken, e: | 1437 | except NoAccessToken: |
320 | 1438 | if do_login: | 1438 | if do_login: |
321 | 1439 | yield self._request_token() | 1439 | yield self._request_token() |
322 | 1440 | self.connect(do_login=False) | 1440 | self.connect(do_login=False) |
323 | 1441 | 1441 | ||
324 | === modified file 'ubuntuone/syncdaemon/events_nanny.py' | |||
325 | --- ubuntuone/syncdaemon/events_nanny.py 2009-11-20 22:00:25 +0000 | |||
326 | +++ ubuntuone/syncdaemon/events_nanny.py 2010-02-10 17:40:25 +0000 | |||
327 | @@ -96,7 +96,7 @@ | |||
328 | 96 | self._hashing.add(path) | 96 | self._hashing.add(path) |
329 | 97 | 97 | ||
330 | 98 | try: | 98 | try: |
332 | 99 | opened = self._reduce_opened(path) | 99 | self._reduce_opened(path) |
333 | 100 | except KeyError: | 100 | except KeyError: |
334 | 101 | # it wasn't supervised by open | 101 | # it wasn't supervised by open |
335 | 102 | return | 102 | return |
336 | 103 | 103 | ||
337 | === modified file 'ubuntuone/syncdaemon/filesystem_manager.py' | |||
338 | --- ubuntuone/syncdaemon/filesystem_manager.py 2010-01-14 14:18:32 +0000 | |||
339 | +++ ubuntuone/syncdaemon/filesystem_manager.py 2010-02-10 17:40:25 +0000 | |||
340 | @@ -277,7 +277,7 @@ | |||
341 | 277 | # check if the share exists | 277 | # check if the share exists |
342 | 278 | try: | 278 | try: |
343 | 279 | self._get_share(mdobj["share_id"]) | 279 | self._get_share(mdobj["share_id"]) |
345 | 280 | except KeyError, e: | 280 | except KeyError: |
346 | 281 | # oops, the share is gone!, invalidate this mdid | 281 | # oops, the share is gone!, invalidate this mdid |
347 | 282 | log_warning('Share %s disappeared! deleting mdid: %s', mdobj['share_id'], mdid) | 282 | log_warning('Share %s disappeared! deleting mdid: %s', mdobj['share_id'], mdid) |
348 | 283 | del self.fs[mdid] | 283 | del self.fs[mdid] |
349 | @@ -784,7 +784,6 @@ | |||
350 | 784 | 784 | ||
351 | 785 | def _get_partial_path(self, mdobj): | 785 | def _get_partial_path(self, mdobj): |
352 | 786 | """Gets the path of the .partial file for a given mdobj""" | 786 | """Gets the path of the .partial file for a given mdobj""" |
353 | 787 | is_dir = mdobj["is_dir"] | ||
354 | 788 | path = self.get_abspath(mdobj['share_id'], mdobj['path']) | 787 | path = self.get_abspath(mdobj['share_id'], mdobj['path']) |
355 | 789 | partial_path = os.path.join(self.partials_dir, mdobj['mdid'] + '.u1partial') | 788 | partial_path = os.path.join(self.partials_dir, mdobj['mdid'] + '.u1partial') |
356 | 790 | dirname, filename = os.path.split(path) | 789 | dirname, filename = os.path.split(path) |
357 | 791 | 790 | ||
358 | === modified file 'ubuntuone/syncdaemon/volume_manager.py' | |||
359 | --- ubuntuone/syncdaemon/volume_manager.py 2010-02-09 15:22:50 +0000 | |||
360 | +++ ubuntuone/syncdaemon/volume_manager.py 2010-02-10 17:40:25 +0000 | |||
361 | @@ -905,7 +905,7 @@ | |||
362 | 905 | return result | 905 | return result |
363 | 906 | try: | 906 | try: |
364 | 907 | d = self._scan_udf(udf) | 907 | d = self._scan_udf(udf) |
366 | 908 | except KeyError, e: | 908 | except KeyError: |
367 | 909 | push_error("METADATA_DOES_NOT_EXIST") | 909 | push_error("METADATA_DOES_NOT_EXIST") |
368 | 910 | else: | 910 | else: |
369 | 911 | d.addCallback(subscribe) | 911 | d.addCallback(subscribe) |
370 | @@ -961,7 +961,7 @@ | |||
371 | 961 | def handle_AQ_DELETE_VOLUME_ERROR(self, volume_id, error): | 961 | def handle_AQ_DELETE_VOLUME_ERROR(self, volume_id, error): |
372 | 962 | """Handle AQ_DELETE_VOLUME_ERROR.""" | 962 | """Handle AQ_DELETE_VOLUME_ERROR.""" |
373 | 963 | try: | 963 | try: |
375 | 964 | volume = self.get_volume(str(volume_id)) | 964 | self.get_volume(str(volume_id)) |
376 | 965 | except KeyError: | 965 | except KeyError: |
377 | 966 | self.log.warning("Received a AQ_DELETE_VOLUME_ERROR of a missing" | 966 | self.log.warning("Received a AQ_DELETE_VOLUME_ERROR of a missing" |
378 | 967 | "volume id") | 967 | "volume id") |
379 | @@ -1310,9 +1310,10 @@ | |||
380 | 1310 | if 'id' in share.__dict__: | 1310 | if 'id' in share.__dict__: |
381 | 1311 | share.volume_id = share.__dict__.pop('id') | 1311 | share.volume_id = share.__dict__.pop('id') |
382 | 1312 | if 'free_bytes' in share.__dict__: | 1312 | if 'free_bytes' in share.__dict__: |
384 | 1313 | free_bytes = share.__dict__.pop('free_bytes') | 1313 | # FIXME: REVIEWERS PLEASE CONFIRM THIS IS CORRECT |
385 | 1314 | share.free_bytes = share.__dict__.pop('free_bytes') | ||
386 | 1314 | else: | 1315 | else: |
388 | 1315 | free_bytes = None | 1316 | share.free_bytes = None |
389 | 1316 | return share | 1317 | return share |
390 | 1317 | # handle the root special case | 1318 | # handle the root special case |
391 | 1318 | if share.path == self._root_dir or share.id == '': | 1319 | if share.path == self._root_dir or share.id == '': |
392 | 1319 | 1320 | ||
393 | === modified file 'ubuntuone/u1sync/client.py' | |||
394 | --- ubuntuone/u1sync/client.py 2010-01-20 23:18:55 +0000 | |||
395 | +++ ubuntuone/u1sync/client.py 2010-02-10 17:40:25 +0000 | |||
396 | @@ -365,7 +365,7 @@ | |||
397 | 365 | lambda f: waiter.wake((None, None, f))) | 365 | lambda f: waiter.wake((None, None, f))) |
398 | 366 | else: | 366 | else: |
399 | 367 | waiter.wake((d, None, None)) | 367 | waiter.wake((d, None, None)) |
401 | 368 | except Exception, e: | 368 | except Exception: |
402 | 369 | waiter.wake((None, sys.exc_info(), None)) | 369 | waiter.wake((None, sys.exc_info(), None)) |
403 | 370 | 370 | ||
404 | 371 | self.reactor.callFromThread(runner) | 371 | self.reactor.callFromThread(runner) |
405 | 372 | 372 | ||
406 | === modified file 'ubuntuone/u1sync/main.py' | |||
407 | --- ubuntuone/u1sync/main.py 2010-01-20 22:56:50 +0000 | |||
408 | +++ ubuntuone/u1sync/main.py 2010-02-10 17:40:25 +0000 | |||
409 | @@ -442,7 +442,7 @@ | |||
410 | 442 | """Capture the exception from calling func.""" | 442 | """Capture the exception from calling func.""" |
411 | 443 | try: | 443 | try: |
412 | 444 | func() | 444 | func() |
414 | 445 | except Exception, e: | 445 | except Exception: |
415 | 446 | queue.put(sys.exc_info()) | 446 | queue.put(sys.exc_info()) |
416 | 447 | else: | 447 | else: |
417 | 448 | queue.put(None) | 448 | queue.put(None) |
Fix all lint warnings on lucid. There is one change I wasn't sure about, it is commented in the diff.