Merge lp:~lifeless/launchpad/memcache into lp:launchpad/db-devel

Proposed by Robert Collins on 2010-09-12
Status: Merged
Approved by: Michael Hudson-Doyle on 2010-09-12
Approved revision: no longer in the source branch.
Merged at revision: 9777
Proposed branch: lp:~lifeless/launchpad/memcache
Merge into: lp:launchpad/db-devel
Prerequisite: lp:~lifeless/launchpad/bug-631884
Diff against target: 119 lines (+53/-7)
3 files modified
lib/lp/services/memcache/client.py (+16/-1)
lib/lp/services/memcache/doc/tales-cache.txt (+33/-0)
lib/lp/services/memcache/tales.py (+4/-6)
To merge this branch: bzr merge lp:~lifeless/launchpad/memcache
Reviewer Review Type Date Requested Status
Steve Kowalik (community) code* 2010-09-12 Approve on 2010-09-13
Review via email: mp+35220@code.launchpad.net

Commit Message

Permit disabling memcache via feature flags.

Description of the Change

Allow disabling memcache via feature flags. This will permit some easy experiments where we have pages that appear to spend a lot of time in memcache, or as a workaround if/when we find that there is a bug in a memcache expression : we can workaround it immediately rather than after-merging-a-code-change.

To post a comment you must log in.
Michael Hudson-Doyle (mwhudson) wrote :

Looks fine.

Steve Kowalik (stevenk) wrote :

Adding my conditional approve, based on Michael's, and my own reading of the diff.

review: Approve (code*)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'lib/lp/services/memcache/client.py'
2--- lib/lp/services/memcache/client.py 2010-09-04 05:59:16 +0000
3+++ lib/lp/services/memcache/client.py 2010-09-12 05:59:48 +0000
4@@ -6,12 +6,14 @@
5 __metaclass__ = type
6 __all__ = []
7
8+import logging
9 import re
10
11 from lazr.restful.utils import get_current_browser_request
12 import memcache
13
14 from canonical.config import config
15+from lp.services import features
16 from lp.services.timeline.requesttimeline import get_request_timeline
17
18
19@@ -32,7 +34,13 @@
20 timeline = get_request_timeline(request)
21 return timeline.start("memcache-%s" % suffix, key)
22
23+ @property
24+ def _enabled(self):
25+ return features.getFeatureFlag('memcache') != 'disabled'
26+
27 def get(self, key):
28+ if not self._enabled:
29+ return None
30 action = self.__get_timeline_action("get", key)
31 try:
32 return memcache.Client.get(self, key)
33@@ -40,9 +48,16 @@
34 action.finish()
35
36 def set(self, key, value, time=0, min_compress_len=0):
37+ if not self._enabled:
38+ return None
39 action = self.__get_timeline_action("set", key)
40 try:
41- return memcache.Client.set(self, key, value, time=time,
42+ success = memcache.Client.set(self, key, value, time=time,
43 min_compress_len=min_compress_len)
44+ if success:
45+ logging.debug("Memcache set succeeded for %s", key)
46+ else:
47+ logging.warn("Memcache set failed for %s", key)
48+ return success
49 finally:
50 action.finish()
51
52=== modified file 'lib/lp/services/memcache/doc/tales-cache.txt'
53--- lib/lp/services/memcache/doc/tales-cache.txt 2010-06-24 17:59:02 +0000
54+++ lib/lp/services/memcache/doc/tales-cache.txt 2010-09-12 05:59:48 +0000
55@@ -343,3 +343,36 @@
56
57 >>> ignore = config.pop('is_production')
58
59+Disabling
60+---------
61+
62+Memcache in templates can be disabled entirely by setting the memcache flag to
63+'disabled'.
64+
65+ >>> from canonical.launchpad.webapp.servers import LaunchpadTestRequest
66+ >>> from lp.services.features.model import FeatureFlag, getFeatureStore
67+ >>> from lp.services.features.webapp import ScopesFromRequest
68+ >>> from lp.services.features.flags import FeatureController
69+ >>> from lp.services.features import per_thread
70+ >>> ignore = getFeatureStore().add(FeatureFlag(
71+ ... scope=u'default', flag=u'memcache', value=u'disabled',
72+ ... priority=1))
73+ >>> empty_request = LaunchpadTestRequest()
74+ >>> per_thread.features = FeatureController(
75+ ... ScopesFromRequest(empty_request).lookup)
76+
77+And now what cached before will not cache.
78+
79+ >>> template = TestPageTemplate(dedent("""\
80+ ... <div tal:content="cache:public">
81+ ... <span tal:content="param">placeholder</span>
82+ ... </div>"""))
83+
84+ >>> print template(param='first')
85+ <div>
86+ <span>first</span>
87+ </div>
88+ >>> print template(param='second')
89+ <div>
90+ <span>second</span>
91+ </div>
92
93=== modified file 'lib/lp/services/memcache/tales.py'
94--- lib/lp/services/memcache/tales.py 2010-08-20 20:31:18 +0000
95+++ lib/lp/services/memcache/tales.py 2010-09-12 05:59:48 +0000
96@@ -148,7 +148,9 @@
97 + ''.join(chr(i) for i in range(ord(':')+1, 127)) + '_' * 129)
98
99 # We strip digits from our LPCONFIG when generating the key
100- # to ensure that edge1 and edge4 share cache.
101+ # to ensure that multiple appserver instances sharing a memcache instance
102+ # can get hits from each other. For instance edge1 and edge4 are in this
103+ # situation.
104 _lpconfig = config.instance_name.rstrip('0123456789')
105
106 def getKey(self, econtext):
107@@ -278,11 +280,7 @@
108 rule = '%s [%s seconds]' % (self._memcache_expr, self._max_age)
109 value = "<!-- Cache hit: %s -->%s<!-- End cache hit: %s -->" % (
110 rule, value, rule)
111- if getUtility(IMemcacheClient).set(
112- self._key, value, self._max_age):
113- logging.debug("Memcache set succeeded for %s", self._key)
114- else:
115- logging.warn("Memcache set failed for %s", self._key)
116+ getUtility(IMemcacheClient).set(self._key, value, self._max_age)
117
118 def __repr__(self):
119 return "<MemcacheCallback %s %d>" % (self._key, self._max_age)

Subscribers

People subscribed via source and target branches

to status/vote changes: