Merge lp:~nchohan/appscale/appscale-1.6 into lp:appscale

Proposed by Navraj Chohan
Status: Merged
Merged at revision: 790
Proposed branch: lp:~nchohan/appscale/appscale-1.6
Merge into: lp:appscale
Diff against target: 225866 lines (+171892/-26939)
842 files modified
AppController/djinn.rb (+37/-22)
AppController/lib/djinn_job_data.rb (+5/-2)
AppController/lib/haproxy.rb (+6/-6)
AppController/lib/nginx.rb (+7/-7)
AppController/lib/pbserver.rb (+3/-3)
AppController/lib/rabbitmq.rb (+58/-0)
AppController/terminate.rb (+1/-0)
AppDB/appscale_server.py (+8/-3)
AppDB/appscale_server_mysql.py (+85/-20)
AppDB/cassandra/cassandra_helper.rb (+29/-7)
AppDB/cassandra/prime_cassandra.py (+13/-14)
AppDB/cassandra/py_cassandra.py (+64/-85)
AppDB/cassandra/templates/brisk (+16/-0)
AppDB/cassandra/templates/cassandra.yaml (+146/-73)
AppDB/cassandra/test_cassandra.py (+89/-286)
AppDB/hadoop/hadoop_helper.rb (+1/-1)
AppDB/hadoop/patch/hadoop-hbase.patch (+4/-2)
AppDB/hadoop/templates/hadoop (+468/-0)
AppDB/hbase/hbase-status.sh (+1/-1)
AppDB/hbase/hbase_helper.rb (+1/-1)
AppDB/hbase/patch/HMaster.java (+1171/-0)
AppDB/hbase/patch/HRegionServer.java (+2828/-0)
AppDB/hbase/patch/hbase-defaultip.patch (+0/-42)
AppDB/hbase/templates/hbase-site.xml (+2/-2)
AppDB/hypertable/hypertable_helper.rb (+2/-2)
AppDB/hypertable/py_hypertable.py (+9/-7)
AppDB/hypertable/templates/Capfile (+166/-153)
AppDB/hypertable/templates/hypertable.cfg (+0/-2)
AppDB/zkappscale/zktransaction.py (+3/-2)
AppDB/zkappscale/zookeeper_helper.rb (+1/-0)
AppLoadBalancer/app/views/layouts/main.html.erb (+2/-1)
AppLoadBalancer/app/views/status/cloud.html.erb (+2/-2)
AppLoadBalancer/app/views/users/login.html.erb (+2/-2)
AppLoadBalancer/app/views/users/new.html.erb (+2/-2)
AppLoadBalancer/public/stylesheets/bootstrap.min.css (+356/-0)
AppServer/README (+50/-33)
AppServer/RELEASE_NOTES (+429/-0)
AppServer/VERSION (+2/-2)
AppServer/appcfg.py (+29/-8)
AppServer/bulkload_client.py (+29/-8)
AppServer/bulkloader.py (+29/-8)
AppServer/dev_appserver.py (+29/-8)
AppServer/gen_protorpc.py (+99/-0)
AppServer/google/appengine/_internal/__init__.py (+16/-0)
AppServer/google/appengine/_internal/antlr3/__init__.py (+171/-0)
AppServer/google/appengine/_internal/antlr3/compat.py (+48/-0)
AppServer/google/appengine/_internal/antlr3/constants.py (+57/-0)
AppServer/google/appengine/_internal/antlr3/dfa.py (+213/-0)
AppServer/google/appengine/_internal/antlr3/dottreegen.py (+210/-0)
AppServer/google/appengine/_internal/antlr3/exceptions.py (+364/-0)
AppServer/google/appengine/_internal/antlr3/extras.py (+47/-0)
AppServer/google/appengine/_internal/antlr3/recognizers.py (+1474/-0)
AppServer/google/appengine/_internal/antlr3/streams.py (+1452/-0)
AppServer/google/appengine/_internal/antlr3/tokens.py (+416/-0)
AppServer/google/appengine/_internal/antlr3/tree.py (+2446/-0)
AppServer/google/appengine/_internal/antlr3/treewizard.py (+612/-0)
AppServer/google/appengine/_internal/django/__init__.py (+16/-0)
AppServer/google/appengine/_internal/django/conf/__init__.py (+120/-0)
AppServer/google/appengine/_internal/django/conf/app_template/models.py (+3/-0)
AppServer/google/appengine/_internal/django/conf/app_template/tests.py (+23/-0)
AppServer/google/appengine/_internal/django/conf/app_template/views.py (+1/-0)
AppServer/google/appengine/_internal/django/conf/global_settings.py (+524/-0)
AppServer/google/appengine/_internal/django/conf/locale/en/LC_MESSAGES/django.po (+5002/-0)
AppServer/google/appengine/_internal/django/conf/locale/en/LC_MESSAGES/djangojs.po (+145/-0)
AppServer/google/appengine/_internal/django/conf/locale/en/formats.py (+38/-0)
AppServer/google/appengine/_internal/django/conf/project_template/manage.py (+11/-0)
AppServer/google/appengine/_internal/django/conf/project_template/settings.py (+96/-0)
AppServer/google/appengine/_internal/django/conf/project_template/urls.py (+16/-0)
AppServer/google/appengine/_internal/django/conf/urls/defaults.py (+42/-0)
AppServer/google/appengine/_internal/django/conf/urls/i18n.py (+5/-0)
AppServer/google/appengine/_internal/django/conf/urls/shortcut.py (+5/-0)
AppServer/google/appengine/_internal/django/core/cache/__init__.py (+79/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/base.py (+144/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/db.py (+145/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/dummy.py (+37/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/filebased.py (+171/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/locmem.py (+143/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/memcached.py (+104/-0)
AppServer/google/appengine/_internal/django/core/context_processors.py (+102/-0)
AppServer/google/appengine/_internal/django/core/exceptions.py (+87/-0)
AppServer/google/appengine/_internal/django/core/files/__init__.py (+1/-0)
AppServer/google/appengine/_internal/django/core/files/base.py (+134/-0)
AppServer/google/appengine/_internal/django/core/files/images.py (+62/-0)
AppServer/google/appengine/_internal/django/core/files/locks.py (+70/-0)
AppServer/google/appengine/_internal/django/core/files/move.py (+88/-0)
AppServer/google/appengine/_internal/django/core/files/storage.py (+244/-0)
AppServer/google/appengine/_internal/django/core/files/temp.py (+56/-0)
AppServer/google/appengine/_internal/django/core/files/uploadedfile.py (+128/-0)
AppServer/google/appengine/_internal/django/core/files/uploadhandler.py (+215/-0)
AppServer/google/appengine/_internal/django/core/files/utils.py (+29/-0)
AppServer/google/appengine/_internal/django/core/handlers/base.py (+220/-0)
AppServer/google/appengine/_internal/django/core/handlers/modpython.py (+228/-0)
AppServer/google/appengine/_internal/django/core/handlers/profiler-hotshot.py (+22/-0)
AppServer/google/appengine/_internal/django/core/handlers/wsgi.py (+265/-0)
AppServer/google/appengine/_internal/django/core/mail/__init__.py (+107/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/__init__.py (+1/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/base.py (+39/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/console.py (+37/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/dummy.py (+9/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/filebased.py (+59/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/locmem.py (+24/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/smtp.py (+109/-0)
AppServer/google/appengine/_internal/django/core/mail/message.py (+320/-0)
AppServer/google/appengine/_internal/django/core/mail/utils.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/__init__.py (+437/-0)
AppServer/google/appengine/_internal/django/core/management/base.py (+431/-0)
AppServer/google/appengine/_internal/django/core/management/color.py (+50/-0)
AppServer/google/appengine/_internal/django/core/management/commands/cleanup.py (+11/-0)
AppServer/google/appengine/_internal/django/core/management/commands/compilemessages.py (+61/-0)
AppServer/google/appengine/_internal/django/core/management/commands/createcachetable.py (+52/-0)
AppServer/google/appengine/_internal/django/core/management/commands/dbshell.py (+27/-0)
AppServer/google/appengine/_internal/django/core/management/commands/diffsettings.py (+32/-0)
AppServer/google/appengine/_internal/django/core/management/commands/dumpdata.py (+167/-0)
AppServer/google/appengine/_internal/django/core/management/commands/flush.py (+83/-0)
AppServer/google/appengine/_internal/django/core/management/commands/inspectdb.py (+167/-0)
AppServer/google/appengine/_internal/django/core/management/commands/loaddata.py (+240/-0)
AppServer/google/appengine/_internal/django/core/management/commands/makemessages.py (+321/-0)
AppServer/google/appengine/_internal/django/core/management/commands/reset.py (+57/-0)
AppServer/google/appengine/_internal/django/core/management/commands/runfcgi.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/runserver.py (+84/-0)
AppServer/google/appengine/_internal/django/core/management/commands/shell.py (+69/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sql.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlall.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlclear.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlcustom.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlflush.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlindexes.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlinitialdata.py (+7/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlreset.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlsequencereset.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/startapp.py (+47/-0)
AppServer/google/appengine/_internal/django/core/management/commands/startproject.py (+39/-0)
AppServer/google/appengine/_internal/django/core/management/commands/syncdb.py (+156/-0)
AppServer/google/appengine/_internal/django/core/management/commands/test.py (+40/-0)
AppServer/google/appengine/_internal/django/core/management/commands/testserver.py (+33/-0)
AppServer/google/appengine/_internal/django/core/management/commands/validate.py (+9/-0)
AppServer/google/appengine/_internal/django/core/management/sql.py (+182/-0)
AppServer/google/appengine/_internal/django/core/management/validation.py (+302/-0)
AppServer/google/appengine/_internal/django/core/paginator.py (+120/-0)
AppServer/google/appengine/_internal/django/core/serializers/__init__.py (+117/-0)
AppServer/google/appengine/_internal/django/core/serializers/base.py (+172/-0)
AppServer/google/appengine/_internal/django/core/serializers/json.py (+62/-0)
AppServer/google/appengine/_internal/django/core/serializers/python.py (+142/-0)
AppServer/google/appengine/_internal/django/core/serializers/pyyaml.py (+56/-0)
AppServer/google/appengine/_internal/django/core/serializers/xml_serializer.py (+295/-0)
AppServer/google/appengine/_internal/django/core/servers/basehttp.py (+719/-0)
AppServer/google/appengine/_internal/django/core/servers/fastcgi.py (+183/-0)
AppServer/google/appengine/_internal/django/core/signals.py (+5/-0)
AppServer/google/appengine/_internal/django/core/template_loader.py (+7/-0)
AppServer/google/appengine/_internal/django/core/urlresolvers.py (+396/-0)
AppServer/google/appengine/_internal/django/core/validators.py (+172/-0)
AppServer/google/appengine/_internal/django/core/xheaders.py (+24/-0)
AppServer/google/appengine/_internal/django/template/__init__.py (+1050/-0)
AppServer/google/appengine/_internal/django/template/context.py (+149/-0)
AppServer/google/appengine/_internal/django/template/debug.py (+101/-0)
AppServer/google/appengine/_internal/django/template/defaultfilters.py (+920/-0)
AppServer/google/appengine/_internal/django/template/defaulttags.py (+1162/-0)
AppServer/google/appengine/_internal/django/template/loader.py (+198/-0)
AppServer/google/appengine/_internal/django/template/loader_tags.py (+218/-0)
AppServer/google/appengine/_internal/django/template/loaders/app_directories.py (+74/-0)
AppServer/google/appengine/_internal/django/template/loaders/cached.py (+59/-0)
AppServer/google/appengine/_internal/django/template/loaders/eggs.py (+39/-0)
AppServer/google/appengine/_internal/django/template/loaders/filesystem.py (+61/-0)
AppServer/google/appengine/_internal/django/template/smartif.py (+206/-0)
AppServer/google/appengine/_internal/django/templatetags/cache.py (+63/-0)
AppServer/google/appengine/_internal/django/templatetags/i18n.py (+274/-0)
AppServer/google/appengine/_internal/django/utils/_os.py (+45/-0)
AppServer/google/appengine/_internal/django/utils/_threading_local.py (+240/-0)
AppServer/google/appengine/_internal/django/utils/autoreload.py (+119/-0)
AppServer/google/appengine/_internal/django/utils/cache.py (+228/-0)
AppServer/google/appengine/_internal/django/utils/checksums.py (+22/-0)
AppServer/google/appengine/_internal/django/utils/copycompat.py (+14/-0)
AppServer/google/appengine/_internal/django/utils/daemonize.py (+58/-0)
AppServer/google/appengine/_internal/django/utils/datastructures.py (+473/-0)
AppServer/google/appengine/_internal/django/utils/dateformat.py (+286/-0)
AppServer/google/appengine/_internal/django/utils/dates.py (+33/-0)
AppServer/google/appengine/_internal/django/utils/datetime_safe.py (+89/-0)
AppServer/google/appengine/_internal/django/utils/decorators.py (+90/-0)
AppServer/google/appengine/_internal/django/utils/encoding.py (+180/-0)
AppServer/google/appengine/_internal/django/utils/feedgenerator.py (+372/-0)
AppServer/google/appengine/_internal/django/utils/formats.py (+159/-0)
AppServer/google/appengine/_internal/django/utils/functional.py (+367/-0)
AppServer/google/appengine/_internal/django/utils/hashcompat.py (+20/-0)
AppServer/google/appengine/_internal/django/utils/html.py (+189/-0)
AppServer/google/appengine/_internal/django/utils/http.py (+130/-0)
AppServer/google/appengine/_internal/django/utils/importlib.py (+36/-0)
AppServer/google/appengine/_internal/django/utils/itercompat.py (+45/-0)
AppServer/google/appengine/_internal/django/utils/module_loading.py (+63/-0)
AppServer/google/appengine/_internal/django/utils/numberformat.py (+47/-0)
AppServer/google/appengine/_internal/django/utils/regex_helper.py (+328/-0)
AppServer/google/appengine/_internal/django/utils/safestring.py (+119/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/__init__.py (+349/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/decoder.py (+345/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/encoder.py (+430/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/scanner.py (+65/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/tool.py (+35/-0)
AppServer/google/appengine/_internal/django/utils/stopwords.py (+42/-0)
AppServer/google/appengine/_internal/django/utils/synch.py (+87/-0)
AppServer/google/appengine/_internal/django/utils/termcolors.py (+198/-0)
AppServer/google/appengine/_internal/django/utils/text.py (+282/-0)
AppServer/google/appengine/_internal/django/utils/thread_support.py (+12/-0)
AppServer/google/appengine/_internal/django/utils/timesince.py (+69/-0)
AppServer/google/appengine/_internal/django/utils/translation/__init__.py (+104/-0)
AppServer/google/appengine/_internal/django/utils/translation/trans_null.py (+72/-0)
AppServer/google/appengine/_internal/django/utils/translation/trans_real.py (+550/-0)
AppServer/google/appengine/_internal/django/utils/tree.py (+153/-0)
AppServer/google/appengine/_internal/django/utils/tzinfo.py (+77/-0)
AppServer/google/appengine/_internal/django/utils/version.py (+42/-0)
AppServer/google/appengine/_internal/django/utils/xmlutils.py (+14/-0)
AppServer/google/appengine/_internal/graphy/__init__.py (+1/-0)
AppServer/google/appengine/_internal/graphy/backends/google_chart_api/__init__.py (+50/-0)
AppServer/google/appengine/_internal/graphy/backends/google_chart_api/encoders.py (+430/-0)
AppServer/google/appengine/_internal/graphy/backends/google_chart_api/util.py (+230/-0)
AppServer/google/appengine/_internal/graphy/bar_chart.py (+171/-0)
AppServer/google/appengine/_internal/graphy/common.py (+412/-0)
AppServer/google/appengine/_internal/graphy/formatters.py (+192/-0)
AppServer/google/appengine/_internal/graphy/line_chart.py (+122/-0)
AppServer/google/appengine/_internal/graphy/pie_chart.py (+178/-0)
AppServer/google/appengine/_internal/graphy/util.py (+13/-0)
AppServer/google/appengine/api/api_base_pb.py (+16/-0)
AppServer/google/appengine/api/apiproxy_stub.py (+5/-5)
AppServer/google/appengine/api/app_identity/__init__.py (+23/-0)
AppServer/google/appengine/api/app_identity/app_identity.py (+467/-0)
AppServer/google/appengine/api/app_identity/app_identity_service_pb.py (+1704/-0)
AppServer/google/appengine/api/app_identity/app_identity_stub.py (+140/-0)
AppServer/google/appengine/api/app_logging.py (+8/-9)
AppServer/google/appengine/api/appinfo.py (+289/-53)
AppServer/google/appengine/api/appinfo_errors.py (+48/-0)
AppServer/google/appengine/api/appinfo_includes.py (+27/-11)
AppServer/google/appengine/api/backendinfo.py (+227/-0)
AppServer/google/appengine/api/backends/__init__.py (+25/-0)
AppServer/google/appengine/api/backends/backends.py (+241/-0)
AppServer/google/appengine/api/blobstore/blobstore.py (+171/-32)
AppServer/google/appengine/api/blobstore/blobstore_service_pb.py (+368/-3)
AppServer/google/appengine/api/blobstore/dict_blob_storage.py (+63/-0)
AppServer/google/appengine/api/capabilities/capability_service_pb.py (+11/-0)
AppServer/google/appengine/api/capabilities/capability_stub.py (+13/-31)
AppServer/google/appengine/api/channel/channel_service_pb.py (+13/-2)
AppServer/google/appengine/api/channel/channel_service_stub.py (+194/-58)
AppServer/google/appengine/api/conf.py (+435/-0)
AppServer/google/appengine/api/conversion/__init__.py (+45/-0)
AppServer/google/appengine/api/conversion/conversion.py (+517/-0)
AppServer/google/appengine/api/conversion/conversion_service_pb.py (+1112/-0)
AppServer/google/appengine/api/conversion/conversion_stub.py (+163/-0)
AppServer/google/appengine/api/conversion/static/test.html (+11/-0)
AppServer/google/appengine/api/conversion/static/test.txt (+1/-0)
AppServer/google/appengine/api/datastore.py (+489/-261)
AppServer/google/appengine/api/datastore_admin.py (+0/-44)
AppServer/google/appengine/api/datastore_distributed.py (+2/-6)
AppServer/google/appengine/api/datastore_errors.py (+6/-3)
AppServer/google/appengine/api/datastore_file_stub.py (+219/-1004)
AppServer/google/appengine/api/datastore_types.py (+157/-17)
AppServer/google/appengine/api/files/__init__.py (+1/-0)
AppServer/google/appengine/api/files/blobstore.py (+69/-8)
AppServer/google/appengine/api/files/crc32c.py (+155/-0)
AppServer/google/appengine/api/files/file.py (+164/-111)
AppServer/google/appengine/api/files/file_service_pb.py (+1703/-247)
AppServer/google/appengine/api/files/file_service_stub.py (+242/-47)
AppServer/google/appengine/api/files/gs.py (+122/-0)
AppServer/google/appengine/api/files/records.py (+371/-0)
AppServer/google/appengine/api/files/testutil.py (+10/-4)
AppServer/google/appengine/api/images/__init__.py (+267/-30)
AppServer/google/appengine/api/images/images_service_pb.py (+362/-7)
AppServer/google/appengine/api/images/images_stub.py (+252/-25)
AppServer/google/appengine/api/lib_config.py (+138/-74)
AppServer/google/appengine/api/logservice/log_service_pb.py (+2895/-262)
AppServer/google/appengine/api/logservice/logservice.py (+804/-92)
AppServer/google/appengine/api/logservice/logservice_stub.py (+494/-0)
AppServer/google/appengine/api/logservice/logsutil.py (+82/-0)
AppServer/google/appengine/api/mail.py (+111/-6)
AppServer/google/appengine/api/mail_errors.py (+3/-0)
AppServer/google/appengine/api/mail_service_pb.py (+209/-3)
AppServer/google/appengine/api/memcache/__init__.py (+471/-138)
AppServer/google/appengine/api/memcache/memcache_service_pb.py (+28/-0)
AppServer/google/appengine/api/memcache/memcache_stub.py (+32/-9)
AppServer/google/appengine/api/prospective_search/error_pb.py (+10/-0)
AppServer/google/appengine/api/prospective_search/prospective_search.py (+2/-2)
AppServer/google/appengine/api/prospective_search/prospective_search_pb.py (+21/-0)
AppServer/google/appengine/api/prospective_search/prospective_search_stub.py (+13/-9)
AppServer/google/appengine/api/queueinfo.py (+6/-2)
AppServer/google/appengine/api/quota.py (+20/-8)
AppServer/google/appengine/api/rdbms.py (+15/-3)
AppServer/google/appengine/api/rdbms_mysqldb.py (+34/-4)
AppServer/google/appengine/api/rdbms_sqlite.py (+0/-65)
AppServer/google/appengine/api/runtime.py (+0/-72)
AppServer/google/appengine/api/runtime/__init__.py (+24/-0)
AppServer/google/appengine/api/runtime/runtime.py (+121/-0)
AppServer/google/appengine/api/search/ExpressionLexer.py (+1740/-0)
AppServer/google/appengine/api/search/ExpressionParser.py (+1622/-0)
AppServer/google/appengine/api/search/QueryLexer.py (+1679/-0)
AppServer/google/appengine/api/search/QueryParser.py (+3146/-0)
AppServer/google/appengine/api/search/__init__.py (+46/-0)
AppServer/google/appengine/api/search/expression_parser.py (+82/-0)
AppServer/google/appengine/api/search/query_parser.py (+113/-0)
AppServer/google/appengine/api/search/search.py (+2063/-0)
AppServer/google/appengine/api/search/search_service_pb.py (+4423/-0)
AppServer/google/appengine/api/search/simple_search_stub.py (+861/-0)
AppServer/google/appengine/api/system/__init__.py (+16/-0)
AppServer/google/appengine/api/system/system_service_pb.py (+769/-0)
AppServer/google/appengine/api/system/system_stub.py (+83/-0)
AppServer/google/appengine/api/taskqueue/__init__.py (+4/-1)
AppServer/google/appengine/api/taskqueue/taskqueue.py (+638/-98)
AppServer/google/appengine/api/taskqueue/taskqueue_rabbitmq.py (+786/-0)
AppServer/google/appengine/api/taskqueue/taskqueue_service_pb.py (+996/-67)
AppServer/google/appengine/api/taskqueue/taskqueue_stub.py (+1215/-296)
AppServer/google/appengine/api/urlfetch.py (+31/-3)
AppServer/google/appengine/api/urlfetch_errors.py (+7/-1)
AppServer/google/appengine/api/urlfetch_service_pb.py (+12/-0)
AppServer/google/appengine/api/user_service_pb.py (+22/-2)
AppServer/google/appengine/api/users.py (+3/-2)
AppServer/google/appengine/api/validation.py (+31/-9)
AppServer/google/appengine/api/xmpp/SOAPpy/Client.py (+0/-495)
AppServer/google/appengine/api/xmpp/SOAPpy/Config.py (+0/-202)
AppServer/google/appengine/api/xmpp/SOAPpy/Errors.py (+0/-79)
AppServer/google/appengine/api/xmpp/SOAPpy/GSIServer.py (+0/-143)
AppServer/google/appengine/api/xmpp/SOAPpy/NS.py (+0/-104)
AppServer/google/appengine/api/xmpp/SOAPpy/Parser.py (+0/-1067)
AppServer/google/appengine/api/xmpp/SOAPpy/SOAP.py (+0/-40)
AppServer/google/appengine/api/xmpp/SOAPpy/SOAPBuilder.py (+0/-636)
AppServer/google/appengine/api/xmpp/SOAPpy/Server.py (+0/-706)
AppServer/google/appengine/api/xmpp/SOAPpy/Types.py (+0/-1736)
AppServer/google/appengine/api/xmpp/SOAPpy/URLopener.py (+0/-23)
AppServer/google/appengine/api/xmpp/SOAPpy/Utilities.py (+0/-178)
AppServer/google/appengine/api/xmpp/SOAPpy/WSDL.py (+0/-119)
AppServer/google/appengine/api/xmpp/SOAPpy/__init__.py (+0/-16)
AppServer/google/appengine/api/xmpp/SOAPpy/version.py (+0/-2)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/Namespaces.py (+0/-124)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/TimeoutSocket.py (+0/-179)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/UserTuple.py (+0/-99)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/Utility.py (+0/-1348)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/WSDLTools.py (+0/-1602)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/XMLSchema.py (+0/-2879)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/XMLname.py (+0/-90)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/__init__.py (+0/-8)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/c14n.py (+0/-535)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/logging.py (+0/-84)
AppServer/google/appengine/api/xmpp/__init__.py (+24/-4)
AppServer/google/appengine/api/xmpp/xmpp_service_pb.py (+18/-0)
AppServer/google/appengine/api/xmpp/xmpp_service_stub.py (+2/-1)
AppServer/google/appengine/base/capabilities_pb.py (+11/-0)
AppServer/google/appengine/cron/GrocLexer.py (+148/-111)
AppServer/google/appengine/cron/GrocParser.py (+68/-68)
AppServer/google/appengine/datastore/datastore_index.py (+75/-11)
AppServer/google/appengine/datastore/datastore_pb.py (+1008/-709)
AppServer/google/appengine/datastore/datastore_query.py (+1641/-228)
AppServer/google/appengine/datastore/datastore_rpc.py (+501/-258)
AppServer/google/appengine/datastore/datastore_sqlite_stub.py (+224/-922)
AppServer/google/appengine/datastore/document_pb.py (+766/-0)
AppServer/google/appengine/datastore/entity_pb.py (+18/-0)
AppServer/google/appengine/dist/__init__.py (+2/-0)
AppServer/google/appengine/dist/_library.py (+58/-3)
AppServer/google/appengine/dist/httplib.py (+12/-3)
AppServer/google/appengine/dist27/__init__.py (+18/-0)
AppServer/google/appengine/dist27/httplib.py (+791/-0)
AppServer/google/appengine/dist27/urllib.py (+1641/-0)
AppServer/google/appengine/ext/admin/__init__.py (+830/-121)
AppServer/google/appengine/ext/admin/datastore_stats_generator.py (+327/-0)
AppServer/google/appengine/ext/admin/templates/backend.html (+45/-0)
AppServer/google/appengine/ext/admin/templates/backends.html (+78/-0)
AppServer/google/appengine/ext/admin/templates/base.html (+5/-1)
AppServer/google/appengine/ext/admin/templates/css/datastore.css (+6/-0)
AppServer/google/appengine/ext/admin/templates/css/datastore_indexes.css (+57/-0)
AppServer/google/appengine/ext/admin/templates/datastore.html (+5/-2)
AppServer/google/appengine/ext/admin/templates/datastore_edit.html (+2/-1)
AppServer/google/appengine/ext/admin/templates/datastore_indexes.html (+67/-0)
AppServer/google/appengine/ext/admin/templates/datastore_stats.html (+47/-0)
AppServer/google/appengine/ext/admin/templates/inboundmail.html (+1/-0)
AppServer/google/appengine/ext/admin/templates/interactive.html (+1/-0)
AppServer/google/appengine/ext/admin/templates/memcache.html (+3/-0)
AppServer/google/appengine/ext/admin/templates/queues.html (+4/-4)
AppServer/google/appengine/ext/admin/templates/search.html (+53/-0)
AppServer/google/appengine/ext/admin/templates/tasks.html (+22/-5)
AppServer/google/appengine/ext/admin/templates/xmpp.html (+1/-0)
AppServer/google/appengine/ext/appstats/datamodel_pb.py (+14/-0)
AppServer/google/appengine/ext/appstats/recording.py (+186/-95)
AppServer/google/appengine/ext/appstats/static/appstats_css.css (+1/-1)
AppServer/google/appengine/ext/appstats/static/appstats_js.js (+79/-81)
AppServer/google/appengine/ext/appstats/templates/details.html (+2/-4)
AppServer/google/appengine/ext/appstats/templates/main.html (+0/-1)
AppServer/google/appengine/ext/appstats/ui.py (+25/-29)
AppServer/google/appengine/ext/blobstore/blobstore.py (+57/-4)
AppServer/google/appengine/ext/builtins/__init__.py (+43/-27)
AppServer/google/appengine/ext/builtins/admin_redirect/include-python27.yaml (+3/-0)
AppServer/google/appengine/ext/builtins/appstats/include-python27.yaml (+3/-0)
AppServer/google/appengine/ext/builtins/datastore_admin/include-python27.yaml (+12/-0)
AppServer/google/appengine/ext/builtins/default/include-python27.yaml (+1/-0)
AppServer/google/appengine/ext/builtins/deferred/include-python27.yaml (+4/-0)
AppServer/google/appengine/ext/builtins/mapreduce/include-python27.yaml (+4/-0)
AppServer/google/appengine/ext/builtins/remote_api/include-python27.yaml (+3/-0)
AppServer/google/appengine/ext/datastore_admin/copy_handler.py (+62/-37)
AppServer/google/appengine/ext/datastore_admin/delete_handler.py (+6/-1)
AppServer/google/appengine/ext/datastore_admin/remote_api_put_stub.py (+1/-1)
AppServer/google/appengine/ext/datastore_admin/static/css/compiled.css (+1/-1)
AppServer/google/appengine/ext/datastore_admin/static/js/compiled.js (+18/-20)
AppServer/google/appengine/ext/datastore_admin/utils.py (+4/-3)
AppServer/google/appengine/ext/db/__init__.py (+374/-170)
AppServer/google/appengine/ext/db/djangoforms.py (+27/-8)
AppServer/google/appengine/ext/db/metadata.py (+123/-10)
AppServer/google/appengine/ext/db/polymodel.py (+17/-2)
AppServer/google/appengine/ext/db/stats.py (+121/-0)
AppServer/google/appengine/ext/deferred/deferred.py (+24/-4)
AppServer/google/appengine/ext/django/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/backends/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/backends/rdbms/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/backends/rdbms/base.py (+27/-0)
AppServer/google/appengine/ext/django/management/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/management/commands/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/management/commands/getoauthtoken.py (+27/-0)
AppServer/google/appengine/ext/ereporter/report_generator.py (+8/-9)
AppServer/google/appengine/ext/gql/__init__.py (+56/-9)
AppServer/google/appengine/ext/key_range/__init__.py (+8/-5)
AppServer/google/appengine/ext/mapreduce/base_handler.py (+36/-3)
AppServer/google/appengine/ext/mapreduce/context.py (+1/-4)
AppServer/google/appengine/ext/mapreduce/control.py (+14/-7)
AppServer/google/appengine/ext/mapreduce/errors.py (+21/-0)
AppServer/google/appengine/ext/mapreduce/handlers.py (+148/-79)
AppServer/google/appengine/ext/mapreduce/input_readers.py (+294/-56)
AppServer/google/appengine/ext/mapreduce/main.py (+14/-2)
AppServer/google/appengine/ext/mapreduce/mapper_pipeline.py (+127/-0)
AppServer/google/appengine/ext/mapreduce/mapreduce_pipeline.py (+202/-0)
AppServer/google/appengine/ext/mapreduce/model.py (+44/-21)
AppServer/google/appengine/ext/mapreduce/output_writers.py (+325/-37)
AppServer/google/appengine/ext/mapreduce/shuffler.py (+555/-0)
AppServer/google/appengine/ext/mapreduce/static/base.css (+0/-14)
AppServer/google/appengine/ext/mapreduce/static/status.js (+44/-16)
AppServer/google/appengine/ext/mapreduce/status.py (+1/-2)
AppServer/google/appengine/ext/mapreduce/test_support.py (+150/-0)
AppServer/google/appengine/ext/mapreduce/util.py (+164/-3)
AppServer/google/appengine/ext/ndb/__init__.py (+3/-0)
AppServer/google/appengine/ext/ndb/context.py (+1086/-0)
AppServer/google/appengine/ext/ndb/eventloop.py (+262/-0)
AppServer/google/appengine/ext/ndb/key.py (+763/-0)
AppServer/google/appengine/ext/ndb/model.py (+2596/-0)
AppServer/google/appengine/ext/ndb/query.py (+1541/-0)
AppServer/google/appengine/ext/ndb/tasklets.py (+1016/-0)
AppServer/google/appengine/ext/ndb/test_utils.py (+87/-0)
AppServer/google/appengine/ext/ndb/utils.py (+101/-0)
AppServer/google/appengine/ext/remote_api/handler.py (+4/-3)
AppServer/google/appengine/ext/remote_api/remote_api_pb.py (+13/-0)
AppServer/google/appengine/ext/remote_api/remote_api_services.py (+80/-11)
AppServer/google/appengine/ext/remote_api/throttle.py (+7/-2)
AppServer/google/appengine/ext/search/__init__.py (+2/-2)
AppServer/google/appengine/ext/testbed/__init__.py (+209/-71)
AppServer/google/appengine/ext/webapp/__init__.py (+101/-727)
AppServer/google/appengine/ext/webapp/_template.py (+54/-0)
AppServer/google/appengine/ext/webapp/_webapp25.py (+792/-0)
AppServer/google/appengine/ext/webapp/blobstore_handlers.py (+65/-16)
AppServer/google/appengine/ext/webapp/template.py (+77/-14)
AppServer/google/appengine/runtime/apiproxy.py (+20/-0)
AppServer/google/appengine/runtime/cgi.py (+301/-0)
AppServer/google/appengine/runtime/request_environment.py (+152/-0)
AppServer/google/appengine/runtime/runtime.py (+184/-0)
AppServer/google/appengine/runtime/wsgi.py (+253/-0)
AppServer/google/appengine/tools/appcfg.py (+947/-310)
AppServer/google/appengine/tools/dev-channel-js.js (+535/-1187)
AppServer/google/appengine/tools/dev_appserver.py (+847/-1856)
AppServer/google/appengine/tools/dev_appserver_blobimage.py (+6/-2)
AppServer/google/appengine/tools/dev_appserver_blobstore.py (+177/-92)
AppServer/google/appengine/tools/dev_appserver_import_hook.py (+1814/-0)
AppServer/google/appengine/tools/dev_appserver_index.py (+35/-21)
AppServer/google/appengine/tools/dev_appserver_login.py (+0/-5)
AppServer/google/appengine/tools/dev_appserver_main.py (+300/-123)
AppServer/google/appengine/tools/dev_appserver_multiprocess.py (+1093/-0)
AppServer/google/appengine/tools/gen_protorpc.py (+300/-0)
AppServer/google/appengine/tools/os_compat.py (+2/-1)
AppServer/google/net/proto/ProtocolBuffer.py (+440/-6)
AppServer/google/net/proto2/proto/descriptor_pb2.py (+48/-33)
AppServer/google/net/proto2/python/internal/containers.py (+5/-0)
AppServer/google/net/proto2/python/internal/enum_type_wrapper.py (+40/-0)
AppServer/google/net/proto2/python/internal/python_message.py (+9/-0)
AppServer/google/net/proto2/python/public/descriptor.py (+81/-0)
AppServer/google/net/proto2/python/public/message.py (+12/-0)
AppServer/google/net/proto2/python/public/reflection.py (+22/-0)
AppServer/google/net/proto2/python/public/text_format.py (+145/-111)
AppServer/google/storage/speckle/proto/client_error_code_pb2.py (+195/-0)
AppServer/google/storage/speckle/proto/client_pb2.py (+426/-57)
AppServer/google/storage/speckle/proto/jdbc_type.py (+2/-2)
AppServer/google/storage/speckle/proto/sql_pb2.py (+146/-26)
AppServer/google/storage/speckle/python/api/constants/CLIENT.py (+53/-0)
AppServer/google/storage/speckle/python/api/constants/FIELD_TYPE.py (+57/-0)
AppServer/google/storage/speckle/python/api/constants/FLAG.py (+48/-0)
AppServer/google/storage/speckle/python/api/constants/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/api/converters.py (+188/-0)
AppServer/google/storage/speckle/python/api/rdbms.py (+383/-88)
AppServer/google/storage/speckle/python/api/rdbms_apiproxy.py (+9/-41)
AppServer/google/storage/speckle/python/api/rdbms_googleapi.py (+244/-0)
AppServer/google/storage/speckle/python/django/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/backend/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/backend/base.py (+240/-0)
AppServer/google/storage/speckle/python/django/backend/client.py (+48/-0)
AppServer/google/storage/speckle/python/django/backend/oauth2storage.py (+58/-0)
AppServer/google/storage/speckle/python/django/management/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/management/commands/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/management/commands/getoauthtoken.py (+75/-0)
AppServer/google/storage/speckle/python/tool/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/tool/google_sql.py (+229/-0)
AppServer/google_sql.py (+99/-0)
AppServer/lib/antlr3/OWNERS (+2/-5)
AppServer/lib/cacerts/urlfetch_cacerts.txt (+785/-1408)
AppServer/lib/enum/enum/LICENSE (+340/-0)
AppServer/lib/enum/enum/__init__.py (+239/-0)
AppServer/lib/enum/enum/test/test_enum.py (+547/-0)
AppServer/lib/enum/enum/test/tools.py (+28/-0)
AppServer/lib/google-api-python-client/LICENSE (+202/-0)
AppServer/lib/google-api-python-client/MANIFEST.in (+19/-0)
AppServer/lib/google-api-python-client/PKG-INFO (+17/-0)
AppServer/lib/google-api-python-client/README (+48/-0)
AppServer/lib/google-api-python-client/apiclient/anyjson.py (+32/-0)
AppServer/lib/google-api-python-client/apiclient/contrib/buzz/future.json (+142/-0)
AppServer/lib/google-api-python-client/apiclient/contrib/latitude/future.json (+81/-0)
AppServer/lib/google-api-python-client/apiclient/contrib/moderator/future.json (+107/-0)
AppServer/lib/google-api-python-client/apiclient/discovery.py (+659/-0)
AppServer/lib/google-api-python-client/apiclient/errors.py (+99/-0)
AppServer/lib/google-api-python-client/apiclient/ext/appengine.py (+135/-0)
AppServer/lib/google-api-python-client/apiclient/ext/authtools.py (+159/-0)
AppServer/lib/google-api-python-client/apiclient/ext/django_orm.py (+56/-0)
AppServer/lib/google-api-python-client/apiclient/ext/file.py (+63/-0)
AppServer/lib/google-api-python-client/apiclient/http.py (+350/-0)
AppServer/lib/google-api-python-client/apiclient/mimeparse.py (+172/-0)
AppServer/lib/google-api-python-client/apiclient/model.py (+346/-0)
AppServer/lib/google-api-python-client/apiclient/oauth.py (+483/-0)
AppServer/lib/google-api-python-client/bin/enable-app-engine-project (+138/-0)
AppServer/lib/google-api-python-client/functional_tests/test_services.py (+288/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/PKG-INFO (+17/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/SOURCES.txt (+295/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/dependency_links.txt (+1/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/requires.txt (+3/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/top_level.txt (+3/-0)
AppServer/lib/google-api-python-client/oauth2client/appengine.py (+541/-0)
AppServer/lib/google-api-python-client/oauth2client/client.py (+771/-0)
AppServer/lib/google-api-python-client/oauth2client/clientsecrets.py (+113/-0)
AppServer/lib/google-api-python-client/oauth2client/django_orm.py (+114/-0)
AppServer/lib/google-api-python-client/oauth2client/file.py (+92/-0)
AppServer/lib/google-api-python-client/oauth2client/multistore_file.py (+367/-0)
AppServer/lib/google-api-python-client/oauth2client/tools.py (+154/-0)
AppServer/lib/google-api-python-client/runsamples.py (+101/-0)
AppServer/lib/google-api-python-client/runtests.py (+85/-0)
AppServer/lib/google-api-python-client/setpath.sh (+1/-0)
AppServer/lib/google-api-python-client/setup.cfg (+5/-0)
AppServer/lib/google-api-python-client/setup.py (+70/-0)
AppServer/lib/google-api-python-client/uritemplate/__init__.py (+147/-0)
AppServer/lib/grizzled/grizzled/CHANGELOG (+191/-0)
AppServer/lib/grizzled/grizzled/LICENSE (+31/-0)
AppServer/lib/grizzled/grizzled/README (+9/-0)
AppServer/lib/grizzled/grizzled/__init__.py (+23/-0)
AppServer/lib/grizzled/grizzled/cmdline.py (+89/-0)
AppServer/lib/grizzled/grizzled/collections/__init__.py (+21/-0)
AppServer/lib/grizzled/grizzled/collections/dict.py (+559/-0)
AppServer/lib/grizzled/grizzled/config.py (+973/-0)
AppServer/lib/grizzled/grizzled/db/__init__.py (+205/-0)
AppServer/lib/grizzled/grizzled/db/base.py (+980/-0)
AppServer/lib/grizzled/grizzled/db/dbgadfly.py (+185/-0)
AppServer/lib/grizzled/grizzled/db/dummydb.py (+81/-0)
AppServer/lib/grizzled/grizzled/db/mysql.py (+140/-0)
AppServer/lib/grizzled/grizzled/db/oracle.py (+160/-0)
AppServer/lib/grizzled/grizzled/db/postgresql.py (+227/-0)
AppServer/lib/grizzled/grizzled/db/sqlite.py (+141/-0)
AppServer/lib/grizzled/grizzled/db/sqlserver.py (+124/-0)
AppServer/lib/grizzled/grizzled/decorators.py (+181/-0)
AppServer/lib/grizzled/grizzled/exception.py (+52/-0)
AppServer/lib/grizzled/grizzled/file/__init__.py (+323/-0)
AppServer/lib/grizzled/grizzled/file/includer.py (+473/-0)
AppServer/lib/grizzled/grizzled/history.py (+530/-0)
AppServer/lib/grizzled/grizzled/io/__init__.py (+407/-0)
AppServer/lib/grizzled/grizzled/io/filelock.py (+202/-0)
AppServer/lib/grizzled/grizzled/log.py (+122/-0)
AppServer/lib/grizzled/grizzled/misc.py (+146/-0)
AppServer/lib/grizzled/grizzled/net/__init__.py (+103/-0)
AppServer/lib/grizzled/grizzled/net/ftp/__init__.py (+6/-0)
AppServer/lib/grizzled/grizzled/net/ftp/parse.py (+686/-0)
AppServer/lib/grizzled/grizzled/os.py (+386/-0)
AppServer/lib/grizzled/grizzled/proxy.py (+118/-0)
AppServer/lib/grizzled/grizzled/system.py (+244/-0)
AppServer/lib/grizzled/grizzled/test/README (+1/-0)
AppServer/lib/grizzled/grizzled/test/TestProxy.py (+71/-0)
AppServer/lib/grizzled/grizzled/test/collections/TestLRUDict.py (+108/-0)
AppServer/lib/grizzled/grizzled/test/config/TestConfiguration.py (+179/-0)
AppServer/lib/grizzled/grizzled/test/file/Test.py (+62/-0)
AppServer/lib/grizzled/grizzled/test/io/TestPushback.py (+56/-0)
AppServer/lib/grizzled/grizzled/test/misc/TestReadOnly.py (+58/-0)
AppServer/lib/grizzled/grizzled/test/net/ftp/TestFTPListParse.py (+180/-0)
AppServer/lib/grizzled/grizzled/test/system/Test.py (+56/-0)
AppServer/lib/grizzled/grizzled/test/test_helpers.py (+13/-0)
AppServer/lib/grizzled/grizzled/test/text/TestStr2Bool.py (+46/-0)
AppServer/lib/grizzled/grizzled/text/__init__.py (+217/-0)
AppServer/lib/httplib2/httplib2/LICENSE (+21/-0)
AppServer/lib/httplib2/httplib2/OWNERS (+5/-0)
AppServer/lib/httplib2/httplib2/__init__.py (+1529/-0)
AppServer/lib/httplib2/httplib2/cacerts.txt (+714/-0)
AppServer/lib/httplib2/httplib2/httplib2_test.py (+21/-0)
AppServer/lib/httplib2/httplib2/iri2uri.py (+110/-0)
AppServer/lib/httplib2/httplib2/sync_from_mercurial.sh (+49/-0)
AppServer/lib/httplib2/httplib2/test/brokensocket/socket.py (+1/-0)
AppServer/lib/httplib2/httplib2/test/functional/test_proxies.py (+88/-0)
AppServer/lib/httplib2/httplib2/test/miniserver.py (+100/-0)
AppServer/lib/httplib2/httplib2/test/smoke_test.py (+23/-0)
AppServer/lib/httplib2/httplib2/test/test_no_socket.py (+24/-0)
AppServer/lib/oauth2/oauth2/LICENSE (+21/-0)
AppServer/lib/oauth2/oauth2/OWNERS (+2/-0)
AppServer/lib/oauth2/oauth2/__init__.py (+858/-0)
AppServer/lib/prettytable/prettytable/LICENSE (+25/-0)
AppServer/lib/prettytable/prettytable/__init__.py (+625/-0)
AppServer/lib/protorpc/LICENSE (+202/-0)
AppServer/lib/protorpc/protorpc/definition.py (+275/-0)
AppServer/lib/protorpc/protorpc/descriptor.py (+699/-0)
AppServer/lib/protorpc/protorpc/generate.py (+127/-0)
AppServer/lib/protorpc/protorpc/generate_proto.py (+127/-0)
AppServer/lib/protorpc/protorpc/generate_python.py (+204/-0)
AppServer/lib/protorpc/protorpc/message_types.py (+26/-0)
AppServer/lib/protorpc/protorpc/messages.py (+1696/-0)
AppServer/lib/protorpc/protorpc/protobuf.py (+318/-0)
AppServer/lib/protorpc/protorpc/protojson.py (+207/-0)
AppServer/lib/protorpc/protorpc/protourlencode.py (+540/-0)
AppServer/lib/protorpc/protorpc/registry.py (+241/-0)
AppServer/lib/protorpc/protorpc/remote.py (+1211/-0)
AppServer/lib/protorpc/protorpc/static/base.html (+57/-0)
AppServer/lib/protorpc/protorpc/static/forms.html (+31/-0)
AppServer/lib/protorpc/protorpc/static/forms.js (+685/-0)
AppServer/lib/protorpc/protorpc/static/jquery-1.4.2.min.js (+154/-0)
AppServer/lib/protorpc/protorpc/static/jquery.json-2.2.min.js (+31/-0)
AppServer/lib/protorpc/protorpc/static/methods.html (+37/-0)
AppServer/lib/protorpc/protorpc/transport.py (+423/-0)
AppServer/lib/protorpc/protorpc/util.py (+359/-0)
AppServer/lib/protorpc/protorpc/webapp/__init__.py (+18/-0)
AppServer/lib/protorpc/protorpc/webapp/forms.py (+163/-0)
AppServer/lib/protorpc/protorpc/webapp/service_handlers.py (+842/-0)
AppServer/lib/protorpc/protorpc/wsgi/__init__.py (+16/-0)
AppServer/lib/protorpc/protorpc/wsgi/service.py (+204/-0)
AppServer/lib/protorpc/protorpc/wsgi/util.py (+109/-0)
AppServer/lib/python-gflags/AUTHORS (+2/-0)
AppServer/lib/python-gflags/ChangeLog (+41/-0)
AppServer/lib/python-gflags/LICENSE (+28/-0)
AppServer/lib/python-gflags/MANIFEST.in (+19/-0)
AppServer/lib/python-gflags/Makefile (+69/-0)
AppServer/lib/python-gflags/NEWS (+48/-0)
AppServer/lib/python-gflags/OWNERS (+1/-0)
AppServer/lib/python-gflags/PKG-INFO (+10/-0)
AppServer/lib/python-gflags/README (+23/-0)
AppServer/lib/python-gflags/debian/README (+7/-0)
AppServer/lib/python-gflags/debian/changelog (+36/-0)
AppServer/lib/python-gflags/debian/compat (+1/-0)
AppServer/lib/python-gflags/debian/control (+26/-0)
AppServer/lib/python-gflags/debian/copyright (+41/-0)
AppServer/lib/python-gflags/debian/docs (+2/-0)
AppServer/lib/python-gflags/debian/rules (+62/-0)
AppServer/lib/python-gflags/gflags.py (+2769/-0)
AppServer/lib/python-gflags/gflags2man.py (+544/-0)
AppServer/lib/python-gflags/gflags_validators.py (+187/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/PKG-INFO (+10/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/SOURCES.txt (+30/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/dependency_links.txt (+1/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/top_level.txt (+2/-0)
AppServer/lib/python-gflags/setup.cfg (+5/-0)
AppServer/lib/python-gflags/setup.py (+44/-0)
AppServer/lib/python-gflags/tests/flags_modules_for_testing/module_bar.py (+135/-0)
AppServer/lib/python-gflags/tests/flags_modules_for_testing/module_baz.py (+45/-0)
AppServer/lib/python-gflags/tests/flags_modules_for_testing/module_foo.py (+141/-0)
AppServer/lib/python-gflags/tests/gflags_googletest.py (+109/-0)
AppServer/lib/python-gflags/tests/gflags_helpxml_test.py (+535/-0)
AppServer/lib/python-gflags/tests/gflags_unittest.py (+1866/-0)
AppServer/lib/python-gflags/tests/gflags_validators_test.py (+220/-0)
AppServer/lib/simplejson/simplejson/__init__.py (+248/-120)
AppServer/lib/simplejson/simplejson/decoder.py (+159/-72)
AppServer/lib/simplejson/simplejson/encoder.py (+111/-42)
AppServer/lib/simplejson/simplejson/scanner.py (+18/-7)
AppServer/lib/sqlcmd/sqlcmd/LICENSE (+30/-0)
AppServer/lib/sqlcmd/sqlcmd/README (+19/-0)
AppServer/lib/sqlcmd/sqlcmd/__init__.py (+1871/-0)
AppServer/lib/sqlcmd/sqlcmd/config.py (+274/-0)
AppServer/lib/sqlcmd/sqlcmd/ecmd.py (+161/-0)
AppServer/lib/sqlcmd/sqlcmd/exception.py (+77/-0)
AppServer/lib/sqlcmd/sqlcmd/sqlcmd_test.py (+18/-0)
AppServer/lib/webapp2/AUTHORS (+28/-0)
AppServer/lib/webapp2/CHANGES (+615/-0)
AppServer/lib/webapp2/LICENSE (+321/-0)
AppServer/lib/webapp2/MANIFEST.in (+13/-0)
AppServer/lib/webapp2/Makefile (+3/-0)
AppServer/lib/webapp2/PKG-INFO (+51/-0)
AppServer/lib/webapp2/README (+7/-0)
AppServer/lib/webapp2/TODO (+66/-0)
AppServer/lib/webapp2/docs/Makefile (+130/-0)
AppServer/lib/webapp2/docs/_static/README (+1/-0)
AppServer/lib/webapp2/docs/_templates/README (+1/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/layout.html (+224/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/pygapp2.py (+57/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/static/gcode.css (+1965/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/static/webapp2.css (+258/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/theme.conf (+11/-0)
AppServer/lib/webapp2/docs/api/extras.config.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.i18n.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.jinja2.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.json.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.local.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.local_app.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.mako.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.protorpc.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.routes.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.securecookie.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.security.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.sessions.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.sessions_memcache.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.sessions_ndb.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.users.rst (+3/-0)
AppServer/lib/webapp2/docs/api/index.rst (+10/-0)
AppServer/lib/webapp2/docs/api/webapp2.rst (+152/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/auth/models.rst (+27/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/sessions_memcache.rst (+7/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/sessions_ndb.rst (+13/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/users.rst (+9/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/auth.rst (+22/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/config.rst (+17/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/i18n.rst (+59/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/jinja2.rst (+30/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/json.rst (+25/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/local.rst (+10/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/local_app.rst (+12/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/mako.rst (+30/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/protorpc.rst (+25/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/routes.rst (+24/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/securecookie.rst (+12/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/security.rst (+17/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/sessions.rst (+30/-0)
AppServer/lib/webapp2/docs/conf.py (+257/-0)
AppServer/lib/webapp2/docs/features.rst (+289/-0)
AppServer/lib/webapp2/docs/guide/app.rst (+275/-0)
AppServer/lib/webapp2/docs/guide/exceptions.rst (+139/-0)
AppServer/lib/webapp2/docs/guide/extras.rst (+64/-0)
AppServer/lib/webapp2/docs/guide/handlers.rst (+283/-0)
AppServer/lib/webapp2/docs/guide/index.rst (+9/-0)
AppServer/lib/webapp2/docs/guide/request.rst (+211/-0)
AppServer/lib/webapp2/docs/guide/response.rst (+128/-0)
AppServer/lib/webapp2/docs/guide/routing.rst (+392/-0)
AppServer/lib/webapp2/docs/guide/testing.rst (+118/-0)
AppServer/lib/webapp2/docs/index.rst (+205/-0)
AppServer/lib/webapp2/docs/make.bat (+155/-0)
AppServer/lib/webapp2/docs/todo.rst (+178/-0)
AppServer/lib/webapp2/docs/tutorials/auth.rst (+81/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/devenvironment.rst (+50/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/handlingforms.rst (+80/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/helloworld.rst (+128/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/index.rst (+30/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/introduction.rst (+33/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/staticfiles.rst (+79/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/templates.rst (+101/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/uploading.rst (+69/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/usingdatastore.rst (+341/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/usingusers.rst (+83/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/usingwebapp2.rst (+97/-0)
AppServer/lib/webapp2/docs/tutorials/i18n.rst (+205/-0)
AppServer/lib/webapp2/docs/tutorials/index.rst (+10/-0)
AppServer/lib/webapp2/docs/tutorials/installing.packages.rst (+62/-0)
AppServer/lib/webapp2/docs/tutorials/marketplace.single.signon.rst (+6/-0)
AppServer/lib/webapp2/docs/tutorials/quickstart.nogae.rst (+101/-0)
AppServer/lib/webapp2/docs/tutorials/quickstart.rst (+81/-0)
AppServer/lib/webapp2/docs/tutorials/virtualenv.rst (+49/-0)
AppServer/lib/webapp2/run_tests.py (+55/-0)
AppServer/lib/webapp2/setup.cfg (+5/-0)
AppServer/lib/webapp2/setup.py (+68/-0)
AppServer/lib/webapp2/tests/extras_appengine_auth_models_test.py (+196/-0)
AppServer/lib/webapp2/tests/extras_appengine_sessions_memcache_test.py (+136/-0)
AppServer/lib/webapp2/tests/extras_appengine_sessions_ndb_test.py (+172/-0)
AppServer/lib/webapp2/tests/extras_appengine_users_test.py (+98/-0)
AppServer/lib/webapp2/tests/extras_auth_test.py (+299/-0)
AppServer/lib/webapp2/tests/extras_config_test.py (+347/-0)
AppServer/lib/webapp2/tests/extras_i18n_test.py (+399/-0)
AppServer/lib/webapp2/tests/extras_jinja2_test.py (+96/-0)
AppServer/lib/webapp2/tests/extras_json_test.py (+37/-0)
AppServer/lib/webapp2/tests/extras_local_app_test.py (+20/-0)
AppServer/lib/webapp2/tests/extras_mako_test.py (+45/-0)
AppServer/lib/webapp2/tests/extras_protorpc_test.py (+196/-0)
AppServer/lib/webapp2/tests/extras_routes_test.py (+289/-0)
AppServer/lib/webapp2/tests/extras_securecookie_test.py (+46/-0)
AppServer/lib/webapp2/tests/extras_security_test.py (+61/-0)
AppServer/lib/webapp2/tests/extras_sessions_test.py (+219/-0)
AppServer/lib/webapp2/tests/handler_test.py (+682/-0)
AppServer/lib/webapp2/tests/misc_test.py (+130/-0)
AppServer/lib/webapp2/tests/request_test.py (+307/-0)
AppServer/lib/webapp2/tests/resources/__init__.py (+1/-0)
AppServer/lib/webapp2/tests/resources/handlers.py (+15/-0)
AppServer/lib/webapp2/tests/resources/i18n.py (+14/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/hello.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/template1.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/template2.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/template3.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates_compiled/tmpl_3a79873b1b49be244fd5444b1258ce348be26de8.py (+11/-0)
AppServer/lib/webapp2/tests/resources/mako_templates/template1.html (+1/-0)
AppServer/lib/webapp2/tests/resources/protorpc_services.py (+26/-0)
AppServer/lib/webapp2/tests/resources/template.py (+3/-0)
AppServer/lib/webapp2/tests/response_test.py (+331/-0)
AppServer/lib/webapp2/tests/routing_test.py (+314/-0)
AppServer/lib/webapp2/tests/test_base.py (+86/-0)
AppServer/lib/webapp2/tests/webapp1_test.py (+126/-0)
AppServer/lib/webapp2/webapp2.egg-info/PKG-INFO (+51/-0)
AppServer/lib/webapp2/webapp2.egg-info/SOURCES.txt (+150/-0)
AppServer/lib/webapp2/webapp2.egg-info/dependency_links.txt (+1/-0)
AppServer/lib/webapp2/webapp2.egg-info/not-zip-safe (+1/-0)
AppServer/lib/webapp2/webapp2.egg-info/top_level.txt (+2/-0)
AppServer/lib/webapp2/webapp2.py (+1960/-0)
AppServer/lib/webapp2/webapp2_extras/__init__.py (+10/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/__init__.py (+10/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/auth/__init__.py (+10/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/auth/models.py (+390/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/sessions_memcache.py (+51/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/sessions_ndb.py (+120/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/users.py (+70/-0)
AppServer/lib/webapp2/webapp2_extras/auth.py (+644/-0)
AppServer/lib/webapp2/webapp2_extras/config.py (+228/-0)
AppServer/lib/webapp2/webapp2_extras/i18n.py (+915/-0)
AppServer/lib/webapp2/webapp2_extras/jinja2.py (+230/-0)
AppServer/lib/webapp2/webapp2_extras/json.py (+108/-0)
AppServer/lib/webapp2/webapp2_extras/local.py (+231/-0)
AppServer/lib/webapp2/webapp2_extras/local_app.py (+24/-0)
AppServer/lib/webapp2/webapp2_extras/mako.py (+136/-0)
AppServer/lib/webapp2/webapp2_extras/protorpc.py (+215/-0)
AppServer/lib/webapp2/webapp2_extras/routes.py (+353/-0)
AppServer/lib/webapp2/webapp2_extras/securecookie.py (+100/-0)
AppServer/lib/webapp2/webapp2_extras/security.py (+218/-0)
AppServer/lib/webapp2/webapp2_extras/sessions.py (+476/-0)
AppServer/lib/webapp2/webapp2_extras/sessions_memcache.py (+20/-0)
AppServer/lib/webapp2/webapp2_extras/sessions_ndb.py (+20/-0)
AppServer/lib/webapp2/webapp2_extras/users.py (+20/-0)
AppServer/new_project_template/app.yaml (+4/-0)
AppServer/remote_api_shell.py (+29/-8)
AppServer/templates/logging_console.js (+0/-257)
AppServer/templates/logging_console_footer.html (+0/-4)
AppServer/templates/logging_console_header.html (+0/-71)
AppServer/templates/logging_console_middle.html (+0/-4)
AppServer_Java/src/com/google/appengine/api/blobstore/BlobstoreServiceImpl.java (+6/-6)
AppServer_Java/src/com/google/appengine/api/blobstore/dev/DatastoreBlobStorage.java (+86/-50)
AppServer_Java/src/com/google/appengine/api/blobstore/dev/LocalBlobstoreService.java (+5/-5)
AppServer_Java/src/com/google/appengine/api/datastore/dev/HTTPClientDatastoreProxy.java (+39/-24)
AppServer_Java/src/com/google/appengine/api/datastore/dev/LocalDatastoreService.java (+47/-49)
AppServer_Java/src/com/google/appengine/api/memcache/dev/LocalMemcacheService.java (+13/-13)
AppServer_Java/src/com/google/appengine/api/users/dev/LocalLoginServlet.java (+6/-6)
AppServer_Java/src/com/google/appengine/api/users/dev/LoginCookieUtils.java (+10/-11)
LICENSE (+6/-0)
RELEASE (+10/-1)
VERSION (+7/-1)
debian/appscale_build.sh (+11/-3)
debian/appscale_install.sh (+6/-0)
debian/appscale_install_functions.sh (+76/-190)
debian/control.core.lucid (+3/-0)
To merge this branch: bzr merge lp:~nchohan/appscale/appscale-1.6
Reviewer Review Type Date Requested Status
Chris Bunch Approve
Navraj Chohan (community) Needs Resubmitting
Review via email: mp+93084@code.launchpad.net

Description of the change

Tested on 1 node for Cassandra, HBase, Hypertable, Mysql, RedisDB, mongodb
Tested on 3 nodes for the same.
Tested with multiple applications with Java/Go/Python.

New items:
RabbitMQ for the task queue system.
HBase 0.90.4
Cassandra 1.0.7
Hypertable 0.9.5.5

GAE 1.6.1 for python and go

Some bug fixes

To post a comment you must log in.
Revision history for this message
Navraj Chohan (nchohan) wrote :

Updated hadoop to 0.20.2-cdh3u3. HBase is also cdh3u3.

Fixed a security issue with task queue request.

Revision history for this message
Chris Bunch (cgb-cs) wrote :

Testing now - works on 2 node Cassandra simple deployment with pipeline-test sample app. Continuing with other apps / deployments.

Revision history for this message
Chris Bunch (cgb-cs) wrote :

A few style comments:

In rabbitmq.rb:

1) On line 11, remove the trailing space.

2) On line 15, shouldn't the semi-colons be double ampersands? Don't you want to only exec the second command if the first succeeds?

3) Lines 20-21 are commented out since you're not using god to manage rabbitmq - thus, remove those and 16-18, since you're defining variables that you're not using.

4) On 11, 25, and 54, you don't need to pass in the secret - functions can get it with HelperFunctions.get_secret()

5) On line 30, extract the rabbitmq port to a global constant

6) Remove line 41, since you don't use it.

7) Remove line 50

8) Remove line 63

review: Needs Fixing
Revision history for this message
Chris Bunch (cgb-cs) wrote :

More style stuff:

djinn_job_data, nginx, and pbserver looks good to me

In haproxy.rb, you've got these lines:

+ # Timeout a request if Mongrel does not accept a connection for 60 seconds
+ timeout connect 60000

which seem to indicate that the value is msec - 60000 msec -> 60 sec

but this doesn't seem to hold for the other vals:

+ # Timeout a request if the client did not read any data for 240 seconds
+ timeout client 60000

why isn't the above 60 seconds as well?

In terminate.rb, this whole block:

+#begin
+# require 'rabbitmq'
+# RabbitMQ.stop
+#rescue Execption
+# puts "Problem with rabbitmq, moving on"
+#end
+

is commented out, so just delete it.

In djinn.rb:

1) Remove "Djinn.rb" from all log_debug statements, since it isn't consistently applied (and other files don't identify themselves) and isn't standard for us.

review: Needs Fixing
Revision history for this message
Navraj Chohan (nchohan) wrote :

Fixes committed.

review: Needs Resubmitting
Revision history for this message
Chris Bunch (cgb-cs) :
review: Approve
lp:~nchohan/appscale/appscale-1.6 updated
790. By Navraj Chohan

RabbitMQ for the task queue system. HBase 0.90.4, Cassandra 1.0.7, Hypertable 0.9.5.5, GAE 1.6.1 for python and go, and other bug fixes

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'AppController/djinn.rb'
--- AppController/djinn.rb 2012-02-01 19:17:26 +0000
+++ AppController/djinn.rb 2012-02-16 06:02:21 +0000
@@ -22,6 +22,7 @@
22require 'nginx'22require 'nginx'
23require 'pbserver'23require 'pbserver'
24require 'blobstore'24require 'blobstore'
25require 'rabbitmq'
25require 'app_controller_client'26require 'app_controller_client'
26require 'user_app_client'27require 'user_app_client'
27require 'ejabberd'28require 'ejabberd'
@@ -188,7 +189,7 @@
188 return "Error: Credential format wrong"189 return "Error: Credential format wrong"
189 end190 end
190191
191 Djinn.log_debug("Parameters were valid")192 Djinn.log_debug("Djinn (set_parameters) Parameters were valid")
192193
193 keyname = possible_credentials["keyname"]194 keyname = possible_credentials["keyname"]
194 @nodes = Djinn.convert_location_array_to_class(djinn_locations, keyname)195 @nodes = Djinn.convert_location_array_to_class(djinn_locations, keyname)
@@ -198,15 +199,15 @@
198 convert_fqdns_to_ips199 convert_fqdns_to_ips
199 @creds = sanitize_credentials200 @creds = sanitize_credentials
200201
201 Djinn.log_debug("Djinn locations: #{@nodes.join(', ')}")202 Djinn.log_debug("Djinn (set_parameters) locations: #{@nodes.join(', ')}")
202 Djinn.log_debug("DB Credentials: #{HelperFunctions.obscure_creds(@creds).inspect}")203 Djinn.log_debug("Djinn (set_parameters) DB Credentials: #{HelperFunctions.obscure_creds(@creds).inspect}")
203 Djinn.log_debug("Apps to load: #{@app_names.join(', ')}")204 Djinn.log_debug("Apps to load: #{@app_names.join(', ')}")
204205
205 find_me_in_locations206 find_me_in_locations
206 if @my_index == nil207 if @my_index == nil
207 return "Error: Couldn't find me in the node map"208 return "Error: Couldn't find me in the node map"
208 end209 end
209 Djinn.log_debug("My index = #{@my_index}")210 Djinn.log_debug("Djinn (set_parameters) My index = #{@my_index}")
210211
211 ENV['EC2_URL'] = @creds['ec2_url']212 ENV['EC2_URL'] = @creds['ec2_url']
212 213
@@ -223,7 +224,7 @@
223 end224 end
224225
225 @app_names = app_names226 @app_names = app_names
226 return "app names is now #{@app_names.join(', ')}"227 return "App names is now #{@app_names.join(', ')}"
227 end228 end
228 229
229 def status(secret)230 def status(secret)
@@ -284,8 +285,9 @@
284 def stop_app(app_name, secret)285 def stop_app(app_name, secret)
285 return BAD_SECRET_MSG unless valid_secret?(secret)286 return BAD_SECRET_MSG unless valid_secret?(secret)
286 app_name.gsub!(/[^\w\d\-]/, "")287 app_name.gsub!(/[^\w\d\-]/, "")
287 Djinn.log_debug("Shutting down app named [#{app_name}]")288 Djinn.log_debug("Djinn (stop_app): Shutting down app named [#{app_name}]")
288 result = ""289 result = ""
290 Djinn.log_run("rm -rf /var/apps/#{app_name}")
289 291
290 # app shutdown process can take more than 30 seconds292 # app shutdown process can take more than 30 seconds
291 # so run it in a new thread to avoid 'execution expired'293 # so run it in a new thread to avoid 'execution expired'
@@ -299,7 +301,7 @@
299 ip = node.private_ip301 ip = node.private_ip
300 acc = AppControllerClient.new(ip, @@secret)302 acc = AppControllerClient.new(ip, @@secret)
301 result = acc.stop_app(app_name)303 result = acc.stop_app(app_name)
302 Djinn.log_debug("Removing application #{app_name} --- #{ip} returned #{result} (#{result.class})")304 Djinn.log_debug("Djinn (stop_app): Removing application #{app_name} --- #{ip} returned #{result} (#{result.class})")
303 end305 end
304 }306 }
305 end307 end
@@ -309,7 +311,7 @@
309 ip = HelperFunctions.read_file("#{APPSCALE_HOME}/.appscale/masters")311 ip = HelperFunctions.read_file("#{APPSCALE_HOME}/.appscale/masters")
310 uac = UserAppClient.new(ip, @@secret)312 uac = UserAppClient.new(ip, @@secret)
311 result = uac.delete_app(app_name)313 result = uac.delete_app(app_name)
312 Djinn.log_debug("Delete app: #{ip} returned #{result} (#{result.class})")314 Djinn.log_debug("Djinn (stop_app) Delete app: #{ip} returned #{result} (#{result.class})")
313 end315 end
314 316
315 # may need to stop XMPP listener317 # may need to stop XMPP listener
@@ -364,7 +366,7 @@
364 ip = @nodes[index].private_ip366 ip = @nodes[index].private_ip
365 acc = AppControllerClient.new(ip, @@secret)367 acc = AppControllerClient.new(ip, @@secret)
366 result = acc.set_apps(apps)368 result = acc.set_apps(apps)
367 Djinn.log_debug("#{ip} returned #{result} (#{result.class})")369 Djinn.log_debug("djinn.rb:update #{ip} returned #{result} (#{result.class})")
368 @everyone_else_is_done = false if !result370 @everyone_else_is_done = false if !result
369 }371 }
370372
@@ -706,16 +708,16 @@
706 infrastructure = @creds["infrastructure"] 708 infrastructure = @creds["infrastructure"]
707 709
708 if is_hybrid_cloud?710 if is_hybrid_cloud?
709 Djinn.log_debug("Getting hybrid ips with creds #{@creds.inspect}")711 Djinn.log_debug("Djinn (set_uaserver_ips): Getting hybrid ips with creds #{@creds.inspect}")
710 public_ips, private_ips = HelperFunctions.get_hybrid_ips(@creds)712 public_ips, private_ips = HelperFunctions.get_hybrid_ips(@creds)
711 else713 else
712 Djinn.log_debug("Getting cloud ips for #{infrastructure} with keyname #{keyname}")714 Djinn.log_debug("Djinn (set_uaserver_ips): Getting cloud ips for #{infrastructure} with keyname #{keyname}")
713 public_ips, private_ips = HelperFunctions.get_cloud_ips(infrastructure, keyname)715 public_ips, private_ips = HelperFunctions.get_cloud_ips(infrastructure, keyname)
714 end716 end
715 717
716 Djinn.log_debug("Public ips are #{public_ips.join(', ')}")718 Djinn.log_debug("Djinn (set_uaserver_ips): Public ips are #{public_ips.join(', ')}")
717 Djinn.log_debug("Private ips are #{private_ips.join(', ')}")719 Djinn.log_debug("Djinn (set_uaserver_ips): Private ips are #{private_ips.join(', ')}")
718 Djinn.log_debug("Looking for #{ip_addr}")720 Djinn.log_debug("Djinn (set_uaserver_ips): Looking for #{ip_addr}")
719721
720 public_ips.each_index { |index|722 public_ips.each_index { |index|
721 node_public_ip = HelperFunctions.convert_fqdn_to_ip(public_ips[index])723 node_public_ip = HelperFunctions.convert_fqdn_to_ip(public_ips[index])
@@ -747,16 +749,16 @@
747 infrastructure = @creds["infrastructure"] 749 infrastructure = @creds["infrastructure"]
748750
749 if is_hybrid_cloud?751 if is_hybrid_cloud?
750 Djinn.log_debug("Getting hybrid ips with creds #{@creds.inspect}")752 Djinn.log_debug("Djinn (set_uaserver_ips): Getting hybrid ips with creds #{@creds.inspect}")
751 public_ips, private_ips = HelperFunctions.get_hybrid_ips(@creds)753 public_ips, private_ips = HelperFunctions.get_hybrid_ips(@creds)
752 else754 else
753 Djinn.log_debug("Getting cloud ips for #{infrastructure} with keyname #{keyname}")755 Djinn.log_debug("Djinn (set_uaserver_ips): Getting cloud ips for #{infrastructure} with keyname #{keyname}")
754 public_ips, private_ips = HelperFunctions.get_cloud_ips(infrastructure, keyname)756 public_ips, private_ips = HelperFunctions.get_cloud_ips(infrastructure, keyname)
755 end757 end
756758
757 Djinn.log_debug("Public ips are #{public_ips.join(', ')}")759 Djinn.log_debug("Djinn (set_uaserver_ips): Public ips are #{public_ips.join(', ')}")
758 Djinn.log_debug("Private ips are #{private_ips.join(', ')}")760 Djinn.log_debug("Djinn (set_uaserver_ips): Private ips are #{private_ips.join(', ')}")
759 Djinn.log_debug("Looking for #{private_ip}")761 Djinn.log_debug("Djinn (set_uaserver_ips): Looking for #{private_ip}")
760762
761 public_ips.each_index { |index|763 public_ips.each_index { |index|
762 node_private_ip = HelperFunctions.convert_fqdn_to_ip(private_ips[index])764 node_private_ip = HelperFunctions.convert_fqdn_to_ip(private_ips[index])
@@ -874,7 +876,7 @@
874 neptune_job_data=NeptuneJobData)876 neptune_job_data=NeptuneJobData)
875877
876 if !file.exists?(file_to_load)878 if !file.exists?(file_to_load)
877 djinn.log_debug("No neptune data found - no need to restore")879 djinn.log_debug("Djinn (load_neptune_info) No neptune data found - no need to restore")
878 return880 return
879 end881 end
880882
@@ -962,11 +964,11 @@
962 response = Net::HTTP.get_response(URI.parse(repo_url))964 response = Net::HTTP.get_response(URI.parse(repo_url))
963 data = JSON.load(response.body)965 data = JSON.load(response.body)
964 rescue Exception => e966 rescue Exception => e
965 Djinn.log_debug("Update API status saw exception #{e.class}")967 Djinn.log_debug("EXCEPTION for update_api_status connecting to the repo localhost:8079: #{e.class}. Check to see if the repo is up.")
966 data = {}968 data = {}
967 end969 end
968970
969 Djinn.log_debug("Data received is #{data.inspect}")971 Djinn.log_debug("Djinn (update_api_status) Data received for API status is #{data.inspect}")
970972
971 majorities = {}973 majorities = {}
972974
@@ -1228,8 +1230,10 @@
1228 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, UA_SERVER_PORT)1230 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, UA_SERVER_PORT)
1229 end1231 end
12301232
1233
1231 start_blobstore_server if my_node.is_appengine?1234 start_blobstore_server if my_node.is_appengine?
12321235
1236 start_rabbitmq_server
1233 # for neptune jobs, start a place where they can save output to1237 # for neptune jobs, start a place where they can save output to
1234 # also, since repo does health checks on the app engine apis, start it up there too1238 # also, since repo does health checks on the app engine apis, start it up there too
12351239
@@ -1259,6 +1263,17 @@
1259 BlobServer.is_running(db_local_ip)1263 BlobServer.is_running(db_local_ip)
1260 end1264 end
12611265
1266 def start_rabbitmq_server
1267 # The master is run on the shadow, and all slaves are on
1268 # app engine nodes
1269 if my_node.is_shadow?
1270 # The secret provides a unique identifier for rabbitmq
1271 RabbitMQ.start_master()
1272 elsif my_node.is_appengine? or my_node.is_rabbitmq_slave?
1273 # All slaves connect to the master to start
1274 RabbitMQ.start_slave(get_shadow.private_ip)
1275 end
1276 end
12621277
1263 def start_soap_server1278 def start_soap_server
1264 db_master_ip = nil1279 db_master_ip = nil
12651280
=== modified file 'AppController/lib/djinn_job_data.rb'
--- AppController/lib/djinn_job_data.rb 2012-02-01 19:17:26 +0000
+++ AppController/lib/djinn_job_data.rb 2012-02-16 06:02:21 +0000
@@ -39,8 +39,10 @@
39 appscale_jobs += ["login"]39 appscale_jobs += ["login"]
40 appscale_jobs += ["memcache"]40 appscale_jobs += ["memcache"]
41 appscale_jobs += ["open"]41 appscale_jobs += ["open"]
42 appscale_jobs += ["rabbitmq_slave"]
42 appscale_jobs += ["babel_master", "babel_slave"]43 appscale_jobs += ["babel_master", "babel_slave"]
43 appscale_jobs += ["appengine"] # appengine must go last44 appscale_jobs += ["appengine"] # appengine must go last
45
44 46
45 appscale_jobs.each { |job|47 appscale_jobs.each { |job|
46 @jobs << job if roles.include?(job)48 @jobs << job if roles.include?(job)
@@ -67,7 +69,7 @@
67 def serialize69 def serialize
68 keyname = @ssh_key.split("/")[-1]70 keyname = @ssh_key.split("/")[-1]
69 serialized = "#{@public_ip}:#{@private_ip}:#{@jobs.join(':')}:#{@instance_id}:#{@cloud}:#{keyname}"71 serialized = "#{@public_ip}:#{@private_ip}:#{@jobs.join(':')}:#{@instance_id}:#{@cloud}:#{keyname}"
70 Djinn.log_debug("serialized myself to #{serialized}")72 Djinn.log_debug("Serialized current node to #{serialized}")
71 return serialized73 return serialized
72 end74 end
7375
@@ -76,7 +78,7 @@
76 split_data = serialized.split(":")78 split_data = serialized.split(":")
77 roles = split_data[0..-2].join(":")79 roles = split_data[0..-2].join(":")
78 keyname = split_data[-1].split(".")[0]80 keyname = split_data[-1].split(".")[0]
79 Djinn.log_debug("i'm pretty sure roles is [#{roles}] and keyname is [#{keyname}]")81 Djinn.log_debug("Current roles are [#{roles}] and keyname is [#{keyname}]")
80 return DjinnJobData.new(roles, keyname)82 return DjinnJobData.new(roles, keyname)
81 end83 end
8284
@@ -86,6 +88,7 @@
86 else88 else
87 jobs = @jobs.join(', ')89 jobs = @jobs.join(', ')
88 end90 end
91
89 92
90 status = "Node in cloud #{@cloud} with instance id #{@instance_id}" +93 status = "Node in cloud #{@cloud} with instance id #{@instance_id}" +
91 " responds to ssh key #{@ssh_key}, has pub IP #{@public_ip}," +94 " responds to ssh key #{@ssh_key}, has pub IP #{@public_ip}," +
9295
=== modified file 'AppController/lib/haproxy.rb'
--- AppController/lib/haproxy.rb 2011-05-16 07:56:58 +0000
+++ AppController/lib/haproxy.rb 2012-02-16 06:02:21 +0000
@@ -209,15 +209,15 @@
209 # any Mongrel, not just the one that started the session209 # any Mongrel, not just the one that started the session
210 option redispatch210 option redispatch
211211
212 # Timeout a request if the client did not read any data for 120 seconds212 # Timeout a request if the client did not read any data for 60 seconds
213 timeout client 30000213 timeout client 60000
214214
215 # Timeout a request if Mongrel does not accept a connection for 30 seconds215 # Timeout a request if Mongrel does not accept a connection for 60 seconds
216 timeout connect 30000216 timeout connect 60000
217217
218 # Timeout a request if Mongrel does not accept the data on the connection,218 # Timeout a request if Mongrel does not accept the data on the connection,
219 # or does not send a response back in 120 seconds219 # or does not send a response back in 60 seconds
220 timeout server 30000220 timeout server 60000
221 221
222 # Enable the statistics page 222 # Enable the statistics page
223 stats enable223 stats enable
224224
=== modified file 'AppController/lib/nginx.rb'
--- AppController/lib/nginx.rb 2011-06-23 01:38:14 +0000
+++ AppController/lib/nginx.rb 2012-02-16 06:02:21 +0000
@@ -89,9 +89,9 @@
89 proxy_redirect off;89 proxy_redirect off;
90 proxy_pass http://gae_#{app_name};90 proxy_pass http://gae_#{app_name};
91 client_max_body_size 2G;91 client_max_body_size 2G;
92 proxy_connect_timeout 30;92 proxy_connect_timeout 60;
93 client_body_timeout 30;93 client_body_timeout 60;
94 proxy_read_timeout 30;94 proxy_read_timeout 60;
95 }95 }
9696
97 location /404.html {97 location /404.html {
@@ -178,9 +178,9 @@
178 proxy_redirect off;178 proxy_redirect off;
179 proxy_pass http://#{PbServer.name};179 proxy_pass http://#{PbServer.name};
180 client_max_body_size 30M;180 client_max_body_size 30M;
181 proxy_connect_timeout 30;181 proxy_connect_timeout 60;
182 client_body_timeout 30;182 client_body_timeout 60;
183 proxy_read_timeout 30;183 proxy_read_timeout 60;
184 }184 }
185185
186}186}
@@ -322,7 +322,7 @@
322 #tcp_nopush on;322 #tcp_nopush on;
323323
324 #keepalive_timeout 0;324 #keepalive_timeout 0;
325 keepalive_timeout 30;325 keepalive_timeout 60;
326 tcp_nodelay on;326 tcp_nodelay on;
327 server_names_hash_bucket_size 128;327 server_names_hash_bucket_size 128;
328328
329329
=== modified file 'AppController/lib/pbserver.rb'
--- AppController/lib/pbserver.rb 2011-05-19 03:07:09 +0000
+++ AppController/lib/pbserver.rb 2012-02-16 06:02:21 +0000
@@ -17,7 +17,7 @@
17 LISTEN_PORT = 888817 LISTEN_PORT = 8888
18 LISTEN_SSL_PORT = 844318 LISTEN_SSL_PORT = 8443
19 DBS_NEEDING_ONE_PBSERVER = ["mysql"]19 DBS_NEEDING_ONE_PBSERVER = ["mysql"]
20 DBS_WITH_NATIVE_TRANSACTIONS = ["mysql"]20 DBS_WITH_NATIVE_PBSERVER = ["mysql"]
2121
22 def self.start(master_ip, db_local_ip, my_ip, table, zklocations)22 def self.start(master_ip, db_local_ip, my_ip, table, zklocations)
23 pbserver = self.pb_script(table)23 pbserver = self.pb_script(table)
@@ -87,8 +87,8 @@
87 end 87 end
8888
89 def self.pb_script(table)89 def self.pb_script(table)
90 if DBS_WITH_NATIVE_TRANSACTIONS.include?(table)90 if DBS_WITH_NATIVE_PBSERVER.include?(table)
91 return "#{APPSCALE_HOME}/AppDB/appscale_server_native_trans.py"91 return "#{APPSCALE_HOME}/AppDB/appscale_server_#{table}.py"
92 else92 else
93 return "#{APPSCALE_HOME}/AppDB/appscale_server.py"93 return "#{APPSCALE_HOME}/AppDB/appscale_server.py"
94 end94 end
9595
=== added file 'AppController/lib/rabbitmq.rb'
--- AppController/lib/rabbitmq.rb 1970-01-01 00:00:00 +0000
+++ AppController/lib/rabbitmq.rb 2012-02-16 06:02:21 +0000
@@ -0,0 +1,58 @@
1#!/usr/bin/ruby -w
2
3$:.unshift File.join(File.dirname(__FILE__))
4require 'djinn_job_data'
5require 'helperfunctions'
6RABBITMQ_SERVER_PORT = 5672
7# A class to wrap all the interactions with the ejabberd xmpp server
8class RabbitMQ
9 RABBIT_PATH = File.join("/", "etc", "rabbitmq-server")
10
11 def self.start_master()
12 Djinn.log_debug("Starting Rabbit Master")
13 set_cookie()
14 clean_start
15 start_cmd = "rabbitmq-server -detached -setcookie #{HelperFunctions.get_secret()} && rabbitmqctl reset;"
16 stop_cmd = "rabbitmqctl stop"
17
18 Djinn.log_debug(`#{start_cmd}`)
19 end
20
21 # Master IP is who to join
22 def self.start_slave(master_ip)
23 Djinn.log_debug("Starting Rabbit Slave")
24 set_cookie()
25 clean_start
26 #wait_till_port on master node
27 HelperFunctions.sleep_until_port_is_open("appscale-image0", RABBITMQ_SERVER_PORT)
28 # start the server, reset it to join the head node
29
30 start_cmd = ["rabbitmq-server -detached -setcookie #{HelperFunctions.get_secret()}",
31 "rabbitmqctl start_app",
32 "rabbitmqctl stop_app",
33 "rabbitmqctl reset", # this resets the node
34 "rabbitmqctl cluster rabbit@appscale-image0",
35 "rabbitmqctl start_app"]
36 start_cmd = "#{start_cmd.join('; ')}"
37 stop_cmd = "rabbitmqctl stop"
38
39 Djinn.log_debug(`#{start_cmd}`)
40 HelperFunctions.sleep_until_port_is_open("localhost", RABBITMQ_SERVER_PORT)
41 end
42
43
44 def self.stop
45 Djinn.log_debug("Shutting down rabbitmq")
46 end
47
48 def self.set_cookie()
49 cookie_file = "/var/lib/rabbitmq/.erlang.cookie"
50 File.open(cookie_file, 'w') {|f| f.write(HelperFunctions.get_secret()) }
51 end
52
53 def self.clean_start
54 Djinn.log_debug(`rm -rf /var/log/rabbitmq/*`)
55 Djinn.log_debug(`rm -rf /var/lib/rabbitmq/mnesia/*`)
56 end
57
58end
059
=== modified file 'AppController/terminate.rb'
--- AppController/terminate.rb 2011-05-26 20:35:46 +0000
+++ AppController/terminate.rb 2012-02-16 06:02:21 +0000
@@ -135,6 +135,7 @@
135 "beam", "epmd",135 "beam", "epmd",
136 # Voldemort136 # Voldemort
137 "VoldemortServer",137 "VoldemortServer",
138 "rabbitmq",
138# these are too general to kill139# these are too general to kill
139# "java", "python", "python2.6", "python2.5",140# "java", "python", "python2.6", "python2.5",
140 "thin", "god", "djinn", "xmpp_receiver"141 "thin", "god", "djinn", "xmpp_receiver"
141142
=== modified file 'AppDB/appscale_server.py'
--- AppDB/appscale_server.py 2011-09-06 00:41:46 +0000
+++ AppDB/appscale_server.py 2012-02-16 06:02:21 +0000
@@ -150,8 +150,13 @@
150 self.timeTaken = 0150 self.timeTaken = 0
151 def run(self):151 def run(self):
152 s = time.time()152 s = time.time()
153 self.err, self.ret = self.db.put_entity(self.table, self.key,153 ret = self.db.put_entity(self.table, self.key,
154 self.fields, self.values)154 self.fields, self.values)
155 if len(ret) > 1:
156 self.err, self.ret = ret
157 else:
158 self.err = ret[0]
159
155 self.timeTaken = time.time() - s160 self.timeTaken = time.time() - s
156161
157162
@@ -431,7 +436,7 @@
431 print "errcode:",errcode436 print "errcode:",errcode
432 print "errdetail:",errdetail437 print "errdetail:",errdetail
433 self.write( apiresponse.Encode() )438 self.write( apiresponse.Encode() )
434439 del apiresponse
435440
436 def _getGlobalStat(self):441 def _getGlobalStat(self):
437 global_stat_entity=datastore.Entity("__Stat_Total__", id=1)442 global_stat_entity=datastore.Entity("__Stat_Total__", id=1)
@@ -764,6 +769,7 @@
764 clone_qr_pb.clear_cursor()769 clone_qr_pb.clear_cursor()
765 clone_qr_pb.set_more_results( len(results)>0 )770 clone_qr_pb.set_more_results( len(results)>0 )
766 #logger.debug("QUERY_RESULT: %s" % clone_qr_pb)771 #logger.debug("QUERY_RESULT: %s" % clone_qr_pb)
772 del results
767 return (clone_qr_pb.Encode(), 0, "")773 return (clone_qr_pb.Encode(), 0, "")
768774
769775
@@ -1205,7 +1211,6 @@
1205 putresp_pb.key_list().append(e.key())1211 putresp_pb.key_list().append(e.key())
12061212
1207 if PROFILE: appscale_log.write("TOTAL %d %f\n"%(txn.handle(), time.time() - start))1213 if PROFILE: appscale_log.write("TOTAL %d %f\n"%(txn.handle(), time.time() - start))
1208
1209 return (putresp_pb.Encode(), 0, "")1214 return (putresp_pb.Encode(), 0, "")
12101215
12111216
12121217
=== renamed file 'AppDB/appscale_server_native_trans.py' => 'AppDB/appscale_server_mysql.py'
--- AppDB/appscale_server_native_trans.py 2011-09-09 23:51:51 +0000
+++ AppDB/appscale_server_mysql.py 2012-02-16 06:02:21 +0000
@@ -58,6 +58,7 @@
58SECRET_LOCATION = "/etc/appscale/secret.key"58SECRET_LOCATION = "/etc/appscale/secret.key"
59VALID_DATASTORES = []59VALID_DATASTORES = []
60ERROR_CODES = []60ERROR_CODES = []
61ID_KEY_LENGTH = 64
61app_datastore = []62app_datastore = []
62logOn = False63logOn = False
63logFilePtr = ""64logFilePtr = ""
@@ -442,6 +443,9 @@
442 indexes = datastore_pb.CompositeIndices(index_proto)443 indexes = datastore_pb.CompositeIndices(index_proto)
443 for index in indexes.index_list():444 for index in indexes.index_list():
444 index_map.setdefault(index.definition().entity_type(), []).append(index)445 index_map.setdefault(index.definition().entity_type(), []).append(index)
446 # TODO(nchohan)
447 # Looks like we are not storing index info in self.__indexes
448 # When GetIndicies are called it should return the indexes for said app
445 self.__connection_lock.release() 449 self.__connection_lock.release()
446 def Clear(self):450 def Clear(self):
447 pass451 pass
@@ -937,14 +941,59 @@
937 cursor.execute("SELECT RELEASE_LOCK('%s');" % lock_str)941 cursor.execute("SELECT RELEASE_LOCK('%s');" % lock_str)
938 self.__connection.commit()942 self.__connection.commit()
939943
944 def __getRootKey(app_id, ancestor_list):
945 key = app_id # mysql cannot have \ as the first char in the row key
946 a = ancestor_list[0]
947 key += "/"
948
949 # append _ if the name is a number, prevents collisions of key names
950 if a.has_type():
951 key += a.type()
952 else:
953 return None
954
955 if a.has_id():
956 zero_padded_id = ("0" * (ID_KEY_LENGTH - len(str(a.id())))) + str(a.id())
957 key += ":" + zero_padded_id
958 elif a.has_name():
959 if a.name().isdigit():
960 key += ":__key__" + a.name()
961 else:
962 key += ":" + a.name()
963 else:
964 return None
965
966 return key
967
940 @staticmethod968 @staticmethod
941 def __ExtractEntityGroupFromKeys(keys):969 def __ExtractEntityGroupFromKeys(app_id, keys):
942 """Extracts entity group."""970 """Extracts entity group."""
943971 path = keys[0].path()
944 types = set([k.path().element_list()[-1].type() for k in keys])972 element_list = path.element_list()
945 assert len(types) == 1973 def __getRootKey(app_id, ancestor_list):
946974 key = app_id # mysql cannot have \ as the first char in the row key
947 return types.pop()975 a = ancestor_list[0]
976 key += "/"
977
978 # append _ if the name is a number, prevents collisions of key names
979 if a.has_type():
980 key += a.type()
981 else:
982 return None
983
984 if a.has_id():
985 zero_padded_id = ("0" * (ID_KEY_LENGTH - len(str(a.id())))) + str(a.id())
986 key += ":" + zero_padded_id
987 elif a.has_name():
988 if a.name().isdigit():
989 key += ":__key__" + a.name()
990 else:
991 key += ":" + a.name()
992 else:
993 return None
994
995 return key
996 return __getRootKey(app_id, element_list)
948997
949 def AssertPbIsInitialized(self, pb):998 def AssertPbIsInitialized(self, pb):
950 """Raises an exception if the given PB is not initialized and valid."""999 """Raises an exception if the given PB is not initialized and valid."""
@@ -972,9 +1021,9 @@
972 entities = put_request.entity_list()1021 entities = put_request.entity_list()
973 keys = [e.key() for e in entities]1022 keys = [e.key() for e in entities]
974 if put_request.has_transaction():1023 if put_request.has_transaction():
975 entity_group = self.__ExtractEntityGroupFromKeys(keys)1024 entity_group = self.__ExtractEntityGroupFromKeys(app_id, keys)
976 txn_id = put_request.transaction().handle()1025 txn_id = put_request.transaction().handle()
977 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)1026 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
978 for entity in entities:1027 for entity in entities:
979 self.__ValidateKey(entity.key())1028 self.__ValidateKey(entity.key())
9801029
@@ -1004,6 +1053,8 @@
10041053
1005 self.__PutEntities(conn, entities)1054 self.__PutEntities(conn, entities)
1006 put_response.key_list().extend([e.key() for e in entities])1055 put_response.key_list().extend([e.key() for e in entities])
1056 except Exception, e:
1057 print str(e)
1007 finally:1058 finally:
1008 if not put_request.has_transaction():1059 if not put_request.has_transaction():
1009 self.__ReleaseConnection(conn)1060 self.__ReleaseConnection(conn)
@@ -1013,7 +1064,7 @@
1013 try:1064 try:
1014 keys = get_request.key_list()1065 keys = get_request.key_list()
1015 if get_request.has_transaction():1066 if get_request.has_transaction():
1016 entity_group = self.__ExtractEntityGroupFromKeys(keys)1067 entity_group = self.__ExtractEntityGroupFromKeys(app_id, keys)
1017 txn_id = get_request.transaction().handle()1068 txn_id = get_request.transaction().handle()
1018 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)1069 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
1019 for key in keys:1070 for key in keys:
@@ -1036,7 +1087,7 @@
1036 try:1087 try:
1037 keys = delete_request.key_list()1088 keys = delete_request.key_list()
1038 if delete_request.has_transaction():1089 if delete_request.has_transaction():
1039 entity_group = self.__ExtractEntityGroupFromKeys(keys)1090 entity_group = self.__ExtractEntityGroupFromKeys(app_id, keys)
1040 txn_id = delete_request.transaction().handle()1091 txn_id = delete_request.transaction().handle()
1041 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)1092 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
1042 self.__DeleteEntities(conn, delete_request.key_list())1093 self.__DeleteEntities(conn, delete_request.key_list())
@@ -1525,11 +1576,18 @@
1525 'New index id must be 0.')1576 'New index id must be 0.')
15261577
1527 self.__index_lock.acquire()1578 self.__index_lock.acquire()
1579
1580 # If it already exists, just return the index id
1581 if self.__FindIndex(index):
1582 self.__index_lock.release()
1583 id_response.set_value(self.__FindIndex(index))
1584 return
1585
1528 try:1586 try:
1529 if self.__FindIndex(index):1587 #raise apiproxy_errors.ApplicationError(
1530 raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,1588 # datastore_pb.Error.PERMISSION_DENIED,
1531 'Index already exists.')1589 # 'Index already exists.')
15321590
1533 next_id = max([idx.id() for x in self.__indexes.get(app_id, {}).values()1591 next_id = max([idx.id() for x in self.__indexes.get(app_id, {}).values()
1534 for idx in x] + [0]) + 11592 for idx in x] + [0]) + 1
1535 index.set_id(next_id)1593 index.set_id(next_id)
@@ -1544,6 +1602,8 @@
1544 self.__WriteIndexData(conn, app_id)1602 self.__WriteIndexData(conn, app_id)
1545 finally:1603 finally:
1546 self.__ReleaseConnection(conn)1604 self.__ReleaseConnection(conn)
1605 except Exception, e:
1606 print str(e)
1547 finally:1607 finally:
1548 self.__index_lock.release()1608 self.__index_lock.release()
15491609
@@ -1631,7 +1691,6 @@
1631 apirequest.clear_request()1691 apirequest.clear_request()
1632 method = apirequest.method()1692 method = apirequest.method()
1633 http_request_data = apirequest.request()1693 http_request_data = apirequest.request()
1634
1635 if method == "Put":1694 if method == "Put":
1636 response, errcode, errdetail = self.put_request(app_id, 1695 response, errcode, errdetail = self.put_request(app_id,
1637 http_request_data)1696 http_request_data)
@@ -1687,14 +1746,16 @@
1687 self.write(apiresponse.Encode() ) 1746 self.write(apiresponse.Encode() )
16881747
1689 def create_index(self, app_id, http_request_data):1748 def create_index(self, app_id, http_request_data):
1690 index = entity_pb.Index(http_request_data)1749 index = entity_pb.CompositeIndex(http_request_data)
1691 integer = api_base_pb.Integer64Proto()1750 integer = api_base_pb.Integer64Proto()
1692 try:1751 try:
1693 app_datastore._Dynamic_CreateIndex(index, integer)1752 app_datastore._Dynamic_CreateIndex(index, integer)
1694 except Exception, e:1753 except Exception, e:
1754 print str(e)
1695 return (api_base_pb.VoidProto().Encode(),1755 return (api_base_pb.VoidProto().Encode(),
1696 datastore_pb.Error.INTERNAL_ERROR, 1756 datastore_pb.Error.INTERNAL_ERROR,
1697 str(e))1757 str(e))
1758 return (integer.Encode(), 0, "")
16981759
1699 def get_indices(self, app_id, http_request_data):1760 def get_indices(self, app_id, http_request_data):
1700 composite_indices = datastore_pb.CompositeIndices()1761 composite_indices = datastore_pb.CompositeIndices()
@@ -1707,8 +1768,8 @@
1707 return (composite_indices.Encode(), 0, "")1768 return (composite_indices.Encode(), 0, "")
17081769
1709 def update_index(self, app_id, http_request_data):1770 def update_index(self, app_id, http_request_data):
1710 index = entity_pb.Index(http_request_data)1771 index = entity_pb.CompositeIndex(http_request_data)
1711 void_resp = api_base_pb.VoidProto().Encode()1772 void_resp = api_base_pb.VoidProto()
1712 try:1773 try:
1713 app_datastore._Dynamic_UpdateIndex(index, void_resp) 1774 app_datastore._Dynamic_UpdateIndex(index, void_resp)
1714 except Exception, e:1775 except Exception, e:
@@ -1776,6 +1837,7 @@
1776 txn_id = transaction_pb.handle() 1837 txn_id = transaction_pb.handle()
1777 commitres_pb = datastore_pb.CommitResponse()1838 commitres_pb = datastore_pb.CommitResponse()
1778 try:1839 try:
1840 zoo_keeper.releaseLock(app_id, txn_id)
1779 app_datastore._Dynamic_Commit(app_id, transaction_pb, commitres_pb)1841 app_datastore._Dynamic_Commit(app_id, transaction_pb, commitres_pb)
1780 except:1842 except:
1781 return (commitres_pb.Encode(), datastore_pb.Error.PERMISSION_DENIED, "Unable to commit for this transaction") 1843 return (commitres_pb.Encode(), datastore_pb.Error.PERMISSION_DENIED, "Unable to commit for this transaction")
@@ -1785,9 +1847,12 @@
1785 transaction_pb = datastore_pb.Transaction(http_request_data)1847 transaction_pb = datastore_pb.Transaction(http_request_data)
1786 handle = transaction_pb.handle() 1848 handle = transaction_pb.handle()
1787 try:1849 try:
1850 zoo_keeper.releaseLock(app_id, handle)
1788 app_datastore._Dynamic_Rollback(app_id, transaction_pb, None)1851 app_datastore._Dynamic_Rollback(app_id, transaction_pb, None)
1789 except:1852 except Exception, e:
1853 print str(e)
1790 return(api_base_pb.VoidProto().Encode(), datastore_pb.Error.PERMISSION_DENIED, "Unable to rollback for this transaction")1854 return(api_base_pb.VoidProto().Encode(), datastore_pb.Error.PERMISSION_DENIED, "Unable to rollback for this transaction")
1855 print "Transaction with handle %d was roll backed"%handle
1791 return (api_base_pb.VoidProto().Encode(), 0, "")1856 return (api_base_pb.VoidProto().Encode(), 0, "")
17921857
17931858
17941859
=== modified file 'AppDB/cassandra/cassandra_helper.rb'
--- AppDB/cassandra/cassandra_helper.rb 2011-05-30 01:04:15 +0000
+++ AppDB/cassandra/cassandra_helper.rb 2012-02-16 06:02:21 +0000
@@ -15,11 +15,29 @@
15 return false15 return false
16end16end
1717
18def get_local_token(master_ip, slave_ips)
19 # Calculate everyone's token for data partitioning
20 if master_ip == HelperFunctions.local_ip
21 return 0
22 end
23
24 for ii in 0..slave_ips.length
25 # Based on local ip return the correct token
26 # This token generation was taken from:
27 # http://www.datastax.com/docs/0.8/install/cluster_init#cluster-init
28 if slave_ips[ii] == HelperFunctions.local_ip
29 # Add one to offset the master
30 return (ii + 1)*(2**127)/(1 + slave_ips.length)
31 end
32 end
33end
34
18def setup_db_config_files(master_ip, slave_ips, creds)35def setup_db_config_files(master_ip, slave_ips, creds)
19 source_dir = "#{APPSCALE_HOME}/AppDB/cassandra/templates"36 source_dir = "#{APPSCALE_HOME}/AppDB/cassandra/templates"
20 dest_dir = "#{APPSCALE_HOME}/AppDB/cassandra/cassandra/conf"37 dest_dir = "#{APPSCALE_HOME}/AppDB/cassandra/cassandra/conf"
2138
22 all_ips = [master_ip, slave_ips].flatten39 all_ips = [master_ip, slave_ips].flatten
40 local_token = get_local_token(master_ip, slave_ips)
2341
24 files_to_config = `ls #{source_dir}`.split42 files_to_config = `ls #{source_dir}`.split
25 files_to_config.each{ |filename|43 files_to_config.each{ |filename|
@@ -29,6 +47,7 @@
29 contents = source_file.read47 contents = source_file.read
30 contents.gsub!(/APPSCALE-LOCAL/, HelperFunctions.local_ip)48 contents.gsub!(/APPSCALE-LOCAL/, HelperFunctions.local_ip)
31 contents.gsub!(/APPSCALE-MASTER/, master_ip)49 contents.gsub!(/APPSCALE-MASTER/, master_ip)
50 contents.gsub!(/APPSCALE-TOKEN/, "#{local_token}")
32 contents.gsub!(/REPLICATION/, creds["replication"])51 contents.gsub!(/REPLICATION/, creds["replication"])
33 contents.gsub!(/APPSCALE-JMX-PORT/, "7070") 52 contents.gsub!(/APPSCALE-JMX-PORT/, "7070")
34 File.open(full_path_to_write, "w+") { |dest_file|53 File.open(full_path_to_write, "w+") { |dest_file|
@@ -36,16 +55,16 @@
36 }55 }
37 }56 }
38 }57 }
58
39end59end
40
41def start_db_master()60def start_db_master()
42 @state = "Starting up Cassandra on the head node"61 @state = "Starting up Cassandra on the head node"
43 Djinn.log_debug("Starting up Cassandra as master")62 Djinn.log_debug("Starting up Cassandra as master")
4463
45 Djinn.log_debug(`pkill ThriftBroker`)64 Djinn.log_run("pkill ThriftBroker")
46 `rm -rf /var/appscale/cassandra*`65 `rm -rf /var/appscale/cassandra*`
4766 `rm /var/log/appscale/cassandra/system.log`
48 Djinn.log_debug(`#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid`)67 Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid")
49 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, 9160)68 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, 9160)
50end69end
5170
@@ -54,10 +73,12 @@
54 Djinn.log_debug("Starting up Cassandra as slave")73 Djinn.log_debug("Starting up Cassandra as slave")
5574
56 HelperFunctions.sleep_until_port_is_open(Djinn.get_db_master_ip, 9160)75 HelperFunctions.sleep_until_port_is_open(Djinn.get_db_master_ip, 9160)
57 76 sleep(5)
58 `rm -rf /var/appscale/cassandra*`77 `rm -rf /var/appscale/cassandra*`
59 Djinn.log_debug(`#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid`)78 `rm /var/log/appscale/cassandra/system.log`
60 HelperFunctions.sleep_until_port_is_open(Djinn.get_db_master_ip, 9160)79 `#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid`
80 #Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid")
81 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, 9160)
61end82end
6283
63def stop_db_master84def stop_db_master
@@ -70,3 +91,4 @@
70 Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/nodetool decommission -h #{HelperFunctions.local_ip} -p 6666")91 Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/nodetool decommission -h #{HelperFunctions.local_ip} -p 6666")
71 Djinn.log_run("cat /var/appscale/appscale-cassandra.pid | xargs kill -9")92 Djinn.log_run("cat /var/appscale/appscale-cassandra.pid | xargs kill -9")
72end93end
94
7395
=== modified file 'AppDB/cassandra/prime_cassandra.py'
--- AppDB/cassandra/prime_cassandra.py 2011-05-29 21:41:44 +0000
+++ AppDB/cassandra/prime_cassandra.py 2012-02-16 06:02:21 +0000
@@ -18,19 +18,19 @@
18 except pycassa.cassandra.ttypes.InvalidRequestException, e:18 except pycassa.cassandra.ttypes.InvalidRequestException, e:
19 pass19 pass
2020
21 sys.create_keyspace('Keyspace1', replication)21 sys.create_keyspace('Keyspace1', pycassa.SIMPLE_STRATEGY, {'replication_factor':str(replication)})
22 sys.create_column_family('Keyspace1', 'Standard1', 22 sys.create_column_family('Keyspace1', 'Standard1', #column_type="Standard",
23 comparator_type=UTF8_TYPE)23 comparator_type=UTF8_TYPE)
24 sys.create_column_family('Keyspace1', 'Standard2',24 sys.create_column_family('Keyspace1', 'Standard2', #column_type="Standard",
25 comparator_type=UTF8_TYPE)25 comparator_type=UTF8_TYPE)
26 sys.create_column_family('Keyspace1', 'StandardByTime1',26 sys.create_column_family('Keyspace1', 'StandardByTime1', #column_type="Standard",
27 comparator_type=TIME_UUID_TYPE)27 comparator_type=TIME_UUID_TYPE)
28 sys.create_column_family('Keyspace1', 'StandardByTime2',28 sys.create_column_family('Keyspace1', 'StandardByTime2', #column_type="Standard",
29 comparator_type=TIME_UUID_TYPE)29 comparator_type=TIME_UUID_TYPE)
30 sys.create_column_family('Keyspace1', 'Super1', super=True,30 #sys.create_column_family('Keyspace1', 'Super1', column_type="Super",
31 comparator_type=UTF8_TYPE)31 # comparator_type=UTF8_TYPE)
32 sys.create_column_family('Keyspace1', 'Super2', super=True,32 #sys.create_column_family('Keyspace1', 'Super2', column_type="Super",
33 comparator_type=UTF8_TYPE)33 # comparator_type=UTF8_TYPE)
34 sys.close()34 sys.close()
35 print "SUCCESS"35 print "SUCCESS"
3636
@@ -41,7 +41,6 @@
41 #print db.get("__keys_") 41 #print db.get("__keys_")
42 db.create_table(USERS_TABLE, USERS_SCHEMA)42 db.create_table(USERS_TABLE, USERS_SCHEMA)
43 db.create_table(APPS_TABLE, APPS_SCHEMA)43 db.create_table(APPS_TABLE, APPS_SCHEMA)
44
45 if len(db.get_schema(USERS_TABLE)) > 1 and len(db.get_schema(APPS_TABLE)) > 1:44 if len(db.get_schema(USERS_TABLE)) > 1 and len(db.get_schema(APPS_TABLE)) > 1:
46 print "CREATE TABLE SUCCESS FOR USER AND APPS"45 print "CREATE TABLE SUCCESS FOR USER AND APPS"
47 print db.get_schema(USERS_TABLE)46 print db.get_schema(USERS_TABLE)
4847
=== modified file 'AppDB/cassandra/py_cassandra.py'
--- AppDB/cassandra/py_cassandra.py 2011-05-31 00:01:41 +0000
+++ AppDB/cassandra/py_cassandra.py 2012-02-16 06:02:21 +0000
@@ -1,36 +1,31 @@
1#1#
2# Cassandra Interface for AppScale2# Cassandra Interface for AppScale
3# Rewritten by Navraj Chohan for using range queries3# Rewritten by Navraj Chohan for pycassa
4# Modified by Chris Bunch for upgrade to Cassandra 0.50.04# Modified by Chris Bunch for upgrade to Cassandra 0.50.0 # on 2/17/10
5# on 2/17/10
6# Original author: suwanny@gmail.com5# Original author: suwanny@gmail.com
76
8import os,sys7import os,sys
9import time8import time
10
11from thrift_cass.Cassandra import Client
12from thrift_cass.ttypes import *
13
14import string9import string
15import base64 # base64 2009.04.1610import base64
16from dbconstants import *11from dbconstants import *
17from dbinterface import *12from dbinterface import *
18#import sqlalchemy.pool as pool
19import appscale_logger13import appscale_logger
20import pycassa14import pycassa
15from pycassa.system_manager import *
21from thrift import Thrift16from thrift import Thrift
22from thrift.transport import TSocket17from thrift.transport import TSocket
23from thrift.transport import TTransport18from thrift.transport import TTransport
24from thrift.protocol import TBinaryProtocol19from thrift.protocol import TBinaryProtocol
20from pycassa.cassandra.ttypes import NotFoundException
21
25ERROR_DEFAULT = "DB_ERROR:" # ERROR_CASSANDRA22ERROR_DEFAULT = "DB_ERROR:" # ERROR_CASSANDRA
26# Store all schema information in a special table23# Store all schema information in a special table
27# If a table does not show up in this table, try a range query 24# If a table does not show up in this table, try a range query
28# to discover it's schema25# to discover it's schema
29SCHEMA_TABLE = "__key__"26SCHEMA_TABLE = "__key__"
30SCHEMA_TABLE_SCHEMA = ['schema']27SCHEMA_TABLE_SCHEMA = ['schema']
31# use 1 Table and 1 ColumnFamily in Cassandra
32MAIN_TABLE = "Keyspace1"28MAIN_TABLE = "Keyspace1"
33COLUMN_FAMILY = "Standard1"
3429
35PERSISTENT_CONNECTION = False30PERSISTENT_CONNECTION = False
36PROFILING = False31PROFILING = False
@@ -50,9 +45,20 @@
50 f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')45 f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
51 self.host = f.read()46 self.host = f.read()
52 self.port = DEFAULT_PORT47 self.port = DEFAULT_PORT
53 #self.pool = pool.QueuePool(self.__create_connection, reset_on_return=False)48 self.pool = pycassa.ConnectionPool(keyspace='Keyspace1',
54 #connection.add_pool('AppScale', [self.host, self.port])49 server_list=[self.host+":"+str(self.port)],
55 self.pool = pycassa.ConnectionPool(keyspace='Keyspace1', server_list=[self.host+":"+str(self.port)], prefill=False)50 prefill=False)
51 f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
52 host = f.read()
53 sys = SystemManager(host + ":" + str(DEFAULT_PORT))
54 try:
55 sys.create_column_family('Keyspace1',
56 SCHEMA_TABLE,
57 comparator_type=UTF8_TYPE)
58 except Exception, e:
59 print "Exception creating column family: %s"%str(e)
60 pass
61
56 self.logger = logger62 self.logger = logger
5763
58 def logTiming(self, function, start_time, end_time):64 def logTiming(self, function, start_time, end_time):
@@ -62,30 +68,21 @@
62 def get_entity(self, table_name, row_key, column_names):68 def get_entity(self, table_name, row_key, column_names):
63 error = [ERROR_DEFAULT]69 error = [ERROR_DEFAULT]
64 list = error70 list = error
65 client = None
66 row_key = table_name + '/' + row_key71 row_key = table_name + '/' + row_key
67 try:72 try:
68 slice_predicate = SlicePredicate(column_names=column_names)73 cf = pycassa.ColumnFamily(self.pool,
69 path = ColumnPath(COLUMN_FAMILY)74 string.replace(table_name, '-','a'))
70 client = self.__setup_connection()75 result = cf.get(row_key, columns=column_names)
71 # Result is a column type which has name, value, timestamp76 # Order entities by column_names
72 result = client.get_slice(row_key, path, slice_predicate,
73 CONSISTENCY_QUORUM)
74 for column in column_names:77 for column in column_names:
75 for r in result:78 list.append(result[column])
76 c = r.column79 except NotFoundException:
77 if column == c.name:
78 list.append(c.value)
79 except NotFoundException: # occurs normally if the item isn't in the db
80 list[0] += "Not found"80 list[0] += "Not found"
81 self.__close_connection(client)
82 return list81 return list
83 except Exception, ex:82 except Exception, ex:
84 #self.logger.debug("Exception %s" % ex)
85 list[0]+=("Exception: %s"%ex)83 list[0]+=("Exception: %s"%ex)
86 self.__close_connection(client)
87 return list84 return list
88 self.__close_connection(client)85
89 if len(list) == 1:86 if len(list) == 1:
90 list[0] += "Not found"87 list[0] += "Not found"
91 return list88 return list
@@ -93,34 +90,25 @@
93 def put_entity(self, table_name, row_key, column_names, cell_values):90 def put_entity(self, table_name, row_key, column_names, cell_values):
94 error = [ERROR_DEFAULT]91 error = [ERROR_DEFAULT]
95 list = error92 list = error
96 client = None
9793
98 # The first time a table is seen94 # The first time a table is seen
99 if table_name not in table_cache:95 if table_name not in table_cache:
100 self.create_table(table_name, column_names)96 self.create_table(table_name, column_names)
10197
102 row_key = table_name + '/' + row_key98 row_key = table_name + '/' + row_key
103 client = self.__setup_connection()99 cell_dict = {}
104 curtime = self.timestamp()
105 # Result is a column type which has name, value, timestamp
106 mutations = []
107 for index, ii in enumerate(column_names):100 for index, ii in enumerate(column_names):
108 column = Column(name = ii, value=cell_values[index],101 cell_dict[ii] = cell_values[index]
109 timestamp=curtime)102
110 c_or_sc = ColumnOrSuperColumn(column=column)103 try:
111 mutation = Mutation(column_or_supercolumn=c_or_sc)104 # cannot have "-" in the column name
112 mutations.append(mutation)105 cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
113 mutation_map = {row_key : { COLUMN_FAMILY : mutations } }106 except NotFoundException:
114 client.batch_mutate(mutation_map, CONSISTENCY_QUORUM) 107 print "Unable to find column family"
115 """except Exception, ex:108 list[0]+=("Exception: Column family not found")
116 print "EXCEPTION"
117 self.logger.debug("Exception %s" % ex)
118 list[0]+=("Exception: %s"%ex)
119 self.__close_connection(client)
120 list.append("0")
121 return list109 return list
122 """110
123 self.__close_connection(client)111 cf.insert(row_key, cell_dict)
124 list.append("0")112 list.append("0")
125 return list113 return list
126114
@@ -130,18 +118,19 @@
130118
131 def get_table(self, table_name, column_names):119 def get_table(self, table_name, column_names):
132 error = [ERROR_DEFAULT] 120 error = [ERROR_DEFAULT]
133 client = None
134 result = error121 result = error
135 keyslices = []122 keyslices = []
136 start_key = table_name + "/"123 start_key = table_name + "/"
137 end_key = table_name + '/~'124 end_key = table_name + '/~'
138 try: 125 try:
139 cf = pycassa.ColumnFamily(self.pool, 'Standard1')126 cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
140 keyslices = cf.get_range(columns=column_names, 127 keyslices = cf.get_range(columns=column_names,
141 start=start_key, 128 start=start_key,
142 finish=end_key, 129 finish=end_key)
143 read_consistency_level=CONSISTENCY_QUORUM)
144 keyslices = list(keyslices)130 keyslices = list(keyslices)
131 except pycassa.NotFoundException, ex:
132 self.logger.debug("No column fam yet--exception %s" % ex)
133 return result
145 except Exception, ex:134 except Exception, ex:
146 self.logger.debug("Exception %s" % ex)135 self.logger.debug("Exception %s" % ex)
147 result[0] += "Exception: " + str(ex)136 result[0] += "Exception: " + str(ex)
@@ -166,21 +155,15 @@
166 def delete_row(self, table_name, row_key):155 def delete_row(self, table_name, row_key):
167 error = [ERROR_DEFAULT]156 error = [ERROR_DEFAULT]
168 ret = error157 ret = error
169 client = None
170 row_key = table_name + '/' + row_key158 row_key = table_name + '/' + row_key
171 path = ColumnPath(COLUMN_FAMILY)
172 try: 159 try:
173 client = self.__setup_connection()160 cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
174 curtime = self.timestamp()
175 # Result is a column type which has name, value, timestamp161 # Result is a column type which has name, value, timestamp
176 client.remove(row_key, path, curtime,162 cf.remove(row_key)
177 CONSISTENCY_QUORUM)
178 except Exception, ex:163 except Exception, ex:
179 self.logger.debug("Exception %s" % ex)164 self.logger.debug("Exception %s" % ex)
180 ret[0]+=("Exception: %s"%ex)165 ret[0]+=("Exception: %s"%ex)
181 self.__close_connection(client)
182 return ret 166 return ret
183 self.__close_connection(client)
184 ret.append("0")167 ret.append("0")
185 return ret168 return ret
186169
@@ -203,51 +186,47 @@
203 def delete_table(self, table_name):186 def delete_table(self, table_name):
204 error = [ERROR_DEFAULT] 187 error = [ERROR_DEFAULT]
205 result = error188 result = error
206 client = None
207 keyslices = []189 keyslices = []
208 column_parent = ColumnParent(column_family="Standard1")
209 predicate = SlicePredicate(column_names=[])
210 curtime = self.timestamp()190 curtime = self.timestamp()
211 path = ColumnPath(COLUMN_FAMILY)
212 start_key = table_name + "/"191 start_key = table_name + "/"
213 end_key = table_name + '/~'192 end_key = table_name + '/~'
214 try: 193 try:
215 cf = pycassa.ColumnFamily(self.pool, 'Standard1')194 cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
216 keyslices = cf.get_range(columns=[], 195 cf.truncate()
217 start=start_key, 196 self.delete_row(SCHEMA_TABLE, row_key)
218 finish=end_key,
219 read_consistency_level=CONSISTENCY_QUORUM)
220 except Exception, ex:197 except Exception, ex:
221 self.logger.debug("Exception %s" % ex)198 self.logger.debug("Exception %s" % ex)
222 result[0]+=("Exception: %s"%ex)199 result[0]+=("Exception: %s"%ex)
223 return result200 return result
224 keys_removed = False201 if table_name not in table_cache:
225 for keyslice in keyslices:
226 row_key = keyslice[0]
227 client = self.__setup_connection()
228 client.remove(row_key,
229 path,
230 curtime,
231 CONSISTENCY_QUORUM)
232 keys_removed = True
233 if table_name not in table_cache and keys_removed:
234 result[0] += "Table does not exist"202 result[0] += "Table does not exist"
235 return result203 return result
236 if table_name in table_cache:204 if table_name in table_cache:
237 del table_cache[table_name]205 del table_cache[table_name]
238 if client:
239 self.__close_connection(client)
240 return result206 return result
241207
242 # Only stores the schema208 # Only stores the schema
243 def create_table(self, table_name, column_names):209 def create_table(self, table_name, column_names):
244 table_cache[table_name] = 1
245 columns = ':'.join(column_names)210 columns = ':'.join(column_names)
246 row_key = table_name211 row_key = table_name
212 print "CREATE TABLE NAME: " + table_name
247 # Get and make sure we are not overwriting previous schemas213 # Get and make sure we are not overwriting previous schemas
248 ret = self.get_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA)214 ret = self.get_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA)
215 print ret
249 if ret[0] != ERROR_DEFAULT:216 if ret[0] != ERROR_DEFAULT:
250 self.put_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA, [columns])217 f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
218 host = f.read()
219 sysman = SystemManager(host + ":" + str(DEFAULT_PORT))
220 print "Creating column family %s"%table_name
221 try:
222 sysman.create_column_family('Keyspace1', string.replace(table_name, '-','a'), comparator_type=UTF8_TYPE)
223 print "Done creating column family"
224 self.put_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA, [columns])
225 except Exception, e:
226 print "Unable to create column family %s"%str(e)
227 return
228
229 table_cache[table_name] = 1
251230
252 ######################################################################231 ######################################################################
253 # private methods 232 # private methods
254233
=== added file 'AppDB/cassandra/templates/brisk'
--- AppDB/cassandra/templates/brisk 1970-01-01 00:00:00 +0000
+++ AppDB/cassandra/templates/brisk 2012-02-16 06:02:21 +0000
@@ -0,0 +1,16 @@
1# NOTICE: See also /etc/brisk/cassandra/cassandra-env.sh
2
3# EXTRA_CLASSPATH provides the means to extend Cassandra's classpath with
4# additional libraries. It is formatted as a colon-delimited list of
5# class directories and/or jar files. For example, to enable the
6# JMX-to-web bridge install libmx4j-java and uncomment the following.
7#EXTRA_CLASSPATH="/usr/share/java/mx4j-tools.jar"
8
9# enable this start also start Hadoop's JobTracker and/or TaskTrackers on this
10# machine. If left disabled, this will act as a regular Cassandra node.
11HADOOP_ENABLED=1
12
13# enable this to set the replication factor for CFS. Note that this will only
14# have an effect the first time a cluster is started with HADOOP_ENABLED=1 and
15# after that will be a no-op. Defaults to 1.
16#CFS_REPLICATION_FACTOR=1
017
=== modified file 'AppDB/cassandra/templates/cassandra.yaml'
--- AppDB/cassandra/templates/cassandra.yaml 2011-05-30 01:04:15 +0000
+++ AppDB/cassandra/templates/cassandra.yaml 2012-02-16 06:02:21 +0000
@@ -19,23 +19,15 @@
19# the heaviest-loaded existing node. If there is no load information19# the heaviest-loaded existing node. If there is no load information
20# available, such as is the case with a new cluster, it will pick20# available, such as is the case with a new cluster, it will pick
21# a random token, which will lead to hot spots.21# a random token, which will lead to hot spots.
22initial_token:22initial_token: APPSCALE-TOKEN
23
24# Set to true to make new [non-seed] nodes automatically migrate data
25# to themselves from the pre-existing nodes in the cluster. Defaults
26# to false because you can only bootstrap N machines at a time from
27# an existing cluster of N, so if you are bringing up a cluster of
28# 10 machines with 3 seeds you would have to do it in stages. Leaving
29# this off for the initial start simplifies that.
30auto_bootstrap: false
3123
32# See http://wiki.apache.org/cassandra/HintedHandoff24# See http://wiki.apache.org/cassandra/HintedHandoff
33hinted_handoff_enabled: true25hinted_handoff_enabled: true
34# this defines the maximum amount of time a dead host will have hints26# this defines the maximum amount of time a dead host will have hints
35# generated. After it has been dead this long, hints will be dropped.27# generated. After it has been dead this long, hints will be dropped.
36max_hint_window_in_ms: 3600000 # one hour28max_hint_window_in_ms: 3600000 # one hour
37# Sleep this long after delivering each row or row fragment29# Sleep this long after delivering each hint
38hinted_handoff_throttle_delay_in_ms: 5030hinted_handoff_throttle_delay_in_ms: 1
3931
40# authentication backend, implementing IAuthenticator; used to identify users32# authentication backend, implementing IAuthenticator; used to identify users
41authenticator: org.apache.cassandra.auth.AllowAllAuthenticator33authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
@@ -64,10 +56,10 @@
64# ordering. Use this as an example if you need custom collation.56# ordering. Use this as an example if you need custom collation.
65#57#
66# See http://wiki.apache.org/cassandra/Operations for more on58# See http://wiki.apache.org/cassandra/Operations for more on
67# partitioners anv toke liblib libselection.59# partitioners and token selection.
68partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner60partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
6961
70# directories where Cassandra should libstore data on disk.62# directories where Cassandra should store data on disk.
71data_file_directories:63data_file_directories:
72 - /var/appscale/cassandra/data64 - /var/appscale/cassandra/data
7365
@@ -77,21 +69,34 @@
77# saved caches69# saved caches
78saved_caches_directory: /var/appscale/cassandra/saved_caches70saved_caches_directory: /var/appscale/cassandra/saved_caches
7971
80# Size to allow commitlog to grow to before creating a new segment
81commitlog_rotation_threshold_in_mb: 128
82
83# commitlog_sync may be either "periodic" or "batch." 72# commitlog_sync may be either "periodic" or "batch."
84# When in batch mode, Cassandra won't ack writes until the commit log73# When in batch mode, Cassandra won't ack writes until the commit log
85# has been fsynced to disk. It will wait up to74# has been fsynced to disk. It will wait up to
86# CommitLogSyncBatchWindowInMS milliseconds for other writes, before75# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
87# performing the sync.76# performing the sync.
88commitlog_sync: periodic77#
8978# commitlog_sync: batch
79# commitlog_sync_batch_window_in_ms: 50
80#
90# the other option is "periodic" where writes may be acked immediately81# the other option is "periodic" where writes may be acked immediately
91# and the CommitLog is simply synced every commitlog_sync_period_in_ms82# and the CommitLog is simply synced every commitlog_sync_period_in_ms
92# milliseconds.83# milliseconds.
84commitlog_sync: periodic
93commitlog_sync_period_in_ms: 1000085commitlog_sync_period_in_ms: 10000
9486
87# any class that implements the SeedProvider interface and has a
88# constructor that takes a Map<String, String> of parameters will do.
89seed_provider:
90 # Addresses of hosts that are deemed contact points.
91 # Cassandra nodes use this list of hosts to find each other and learn
92 # the topology of the ring. You must change this if you are running
93 # multiple nodes!
94 - class_name: org.apache.cassandra.locator.SimpleSeedProvider
95 parameters:
96 # seeds is actually a comma-delimited list of addresses.
97 # Ex: "<ip1>,<ip2>,<ip3>"
98 - seeds: "APPSCALE-MASTER"
99
95# emergency pressure valve: each time heap usage after a full (CMS)100# emergency pressure valve: each time heap usage after a full (CMS)
96# garbage collection is above this fraction of the max, Cassandra will101# garbage collection is above this fraction of the max, Cassandra will
97# flush the largest memtables. 102# flush the largest memtables.
@@ -117,13 +122,6 @@
117reduce_cache_sizes_at: 0.85122reduce_cache_sizes_at: 0.85
118reduce_cache_capacity_to: 0.6123reduce_cache_capacity_to: 0.6
119124
120# Addresses of hosts that are deemed contact points.
121# Cassandra nodes use this list of hosts to find each other and learn
122# the topology of the ring. You must change this if you are running
123# multiple nodes!
124seeds:
125 - APPSCALE-MASTER
126
127# For workloads with more data than can fit in memory, Cassandra's125# For workloads with more data than can fit in memory, Cassandra's
128# bottleneck will be reads that need to fetch data from126# bottleneck will be reads that need to fetch data from
129# disk. "concurrent_reads" should be set to (16 * number_of_drives) in127# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
@@ -136,6 +134,17 @@
136concurrent_reads: 32134concurrent_reads: 32
137concurrent_writes: 32135concurrent_writes: 32
138136
137# Total memory to use for memtables. Cassandra will flush the largest
138# memtable when this much memory is used.
139# If omitted, Cassandra will set it to 1/3 of the heap.
140# memtable_total_space_in_mb: 2048
141
142# Total space to use for commitlogs.
143# If space gets above this value (it will round up to the next nearest
144# segment multiple), Cassandra will flush every dirty CF in the oldest
145# segment and remove it.
146# commitlog_total_space_in_mb: 4096
147
139# This sets the amount of memtable flush writer threads. These will148# This sets the amount of memtable flush writer threads. These will
140# be blocked by disk io, and each one will hold a memtable in memory149# be blocked by disk io, and each one will hold a memtable in memory
141# while blocked. If you have a large heap and many data directories,150# while blocked. If you have a large heap and many data directories,
@@ -155,6 +164,10 @@
155# TCP port, for commands and data164# TCP port, for commands and data
156storage_port: 7000165storage_port: 7000
157166
167# SSL port, for encrypted communication. Unused unless enabled in
168# encryption_options
169ssl_storage_port: 7001
170
158# Address to bind to and tell other Cassandra nodes to connect to. You171# Address to bind to and tell other Cassandra nodes to connect to. You
159# _must_ change this if you want multiple nodes to be able to172# _must_ change this if you want multiple nodes to be able to
160# communicate!173# communicate!
@@ -167,29 +180,53 @@
167# Setting this to 0.0.0.0 is always wrong.180# Setting this to 0.0.0.0 is always wrong.
168listen_address: APPSCALE-LOCAL181listen_address: APPSCALE-LOCAL
169182
183# Address to broadcast to other Cassandra nodes
184# Leaving this blank will set it to the same value as listen_address
185# broadcast_address: 1.2.3.4
186
170# The address to bind the Thrift RPC service to -- clients connect187# The address to bind the Thrift RPC service to -- clients connect
171# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if188# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
172# you want Thrift to listen on all interfaces.189# you want Thrift to listen on all interfaces.
173# 190#
174# Leaving this blank has the same effect it does for ListenAddress,191# Leaving this blank has the same effect it does for ListenAddress,
175# (i.e. it will be based on the configured hostname of the node).192# (i.e. it will be based on the configured hostname of the node).
176rpc_address: APPSCALE-LOCAL193rpc_address: 0.0.0.0
177# port for Thrift to listen for clients on194# port for Thrift to listen for clients on
178rpc_port: 9160195rpc_port: 9160
179196
180# enable or disable keepalive on rpc connections197# enable or disable keepalive on rpc connections
181rpc_keepalive: true198rpc_keepalive: true
182199
183# Cassandra uses thread-per-client for client RPC. This can200# Cassandra provides three options for the RPC Server:
184# be expensive in memory used for thread stack for a large201#
185# enough number of clients. (Hence, connection pooling is202# sync -> One connection per thread in the rpc pool (see below).
186# very, very strongly recommended.)203# For a very large number of clients, memory will be your limiting
187# 204# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
205# Connection pooling is very, very strongly recommended.
206#
207# async -> Nonblocking server implementation with one thread to serve
208# rpc connections. This is not recommended for high throughput use
209# cases. Async has been tested to be about 50% slower than sync
210# or hsha and is deprecated: it will be removed in the next major release.
211#
212# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
213# (see below) is used to manage requests, but the threads are multiplexed
214# across the different clients.
215#
216# The default is sync because on Windows hsha is about 30% slower. On Linux,
217# sync/hsha performance is about the same, with hsha of course using less memory.
218rpc_server_type: sync
219
188# Uncomment rpc_min|max|thread to set request pool size.220# Uncomment rpc_min|max|thread to set request pool size.
189# You would primarily set max as a safeguard against misbehaved221# You would primarily set max for the sync server to safeguard against
190# clients; if you do hit the max, Cassandra will block until222# misbehaved clients; if you do hit the max, Cassandra will block until one
191# one disconnects before accepting more. The defaults are223# disconnects before accepting more. The defaults for sync are min of 16 and max
192# min of 16 and max unlimited.224# unlimited.
225#
226# For the Hsha server, the min and max both default to quadruple the number of
227# CPU cores.
228#
229# This configuration is ignored by the async server.
193#230#
194# rpc_min_threads: 16231# rpc_min_threads: 16
195# rpc_max_threads: 2048232# rpc_max_threads: 2048
@@ -219,10 +256,6 @@
219# is a data format change.256# is a data format change.
220snapshot_before_compaction: false257snapshot_before_compaction: false
221258
222# change this to increase the compaction thread's priority. In java, 1 is the
223# lowest priority and that is our default.
224# compaction_thread_priority: 1
225
226# Add column indexes to a row after its contents reach this size.259# Add column indexes to a row after its contents reach this size.
227# Increase if your column values are large, or if you have a very large260# Increase if your column values are large, or if you have a very large
228# number of columns. The competing causes are, Cassandra has to261# number of columns. The competing causes are, Cassandra has to
@@ -237,11 +270,48 @@
237# will be logged specifying the row key.270# will be logged specifying the row key.
238in_memory_compaction_limit_in_mb: 64271in_memory_compaction_limit_in_mb: 64
239272
273# Number of simultaneous compactions to allow, NOT including
274# validation "compactions" for anti-entropy repair. Simultaneous
275# compactions can help preserve read performance in a mixed read/write
276# workload, by mitigating the tendency of small sstables to accumulate
277# during a single long running compactions. The default is usually
278# fine and if you experience problems with compaction running too
279# slowly or too fast, you should look at
280# compaction_throughput_mb_per_sec first.
281#
282# This setting has no effect on LeveledCompactionStrategy.
283#
284# concurrent_compactors defaults to the number of cores.
285# Uncomment to make compaction mono-threaded, the pre-0.8 default.
286#concurrent_compactors: 1
287
288# Multi-threaded compaction. When enabled, each compaction will use
289# up to one thread per core, plus one thread per sstable being merged.
290# This is usually only useful for SSD-based hardware: otherwise,
291# your concern is usually to get compaction to do LESS i/o (see:
292# compaction_throughput_mb_per_sec), not more.
293multithreaded_compaction: false
294
295# Throttles compaction to the given total throughput across the entire
296# system. The faster you insert data, the faster you need to compact in
297# order to keep the sstable count down, but in general, setting this to
298# 16 to 32 times the rate you are inserting data is more than sufficient.
299# Setting this to 0 disables throttling. Note that this account for all types
300# of compaction, including validation compaction.
301compaction_throughput_mb_per_sec: 16
302
240# Track cached row keys during compaction, and re-cache their new303# Track cached row keys during compaction, and re-cache their new
241# positions in the compacted sstable. Disable if you use really large304# positions in the compacted sstable. Disable if you use really large
242# key caches.305# key caches.
243compaction_preheat_key_cache: true306compaction_preheat_key_cache: true
244307
308# Throttles all outbound streaming file transfers on this node to the
309# given total throughput in Mbps. This is necessary because Cassandra does
310# mostly sequential IO when streaming data during bootstrap or repair, which
311# can lead to saturating the network connection and degrading rpc performance.
312# When unset, the default is 400 Mbps or 50 MB/s.
313# stream_throughput_outbound_megabits_per_sec: 400
314
245# Time to wait for a reply from other nodes before failing the command 315# Time to wait for a reply from other nodes before failing the command
246rpc_timeout_in_ms: 10000316rpc_timeout_in_ms: 10000
247317
@@ -265,11 +335,6 @@
265# explicitly configured in cassandra-topology.properties.335# explicitly configured in cassandra-topology.properties.
266endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch336endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
267337
268# dynamic_snitch -- This boolean controls whether the above snitch is
269# wrapped with a dynamic snitch, which will monitor read latencies
270# and avoid reading from hosts that have slowed (due to compaction,
271# for instance)
272dynamic_snitch: true
273# controls how often to perform the more expensive part of host score338# controls how often to perform the more expensive part of host score
274# calculation339# calculation
275dynamic_snitch_update_interval_in_ms: 100 340dynamic_snitch_update_interval_in_ms: 100
@@ -283,7 +348,7 @@
283# expressed as a double which represents a percentage. Thus, a value of348# expressed as a double which represents a percentage. Thus, a value of
284# 0.2 means Cassandra would continue to prefer the static snitch values349# 0.2 means Cassandra would continue to prefer the static snitch values
285# until the pinned host was 20% worse than the fastest.350# until the pinned host was 20% worse than the fastest.
286dynamic_snitch_badness_threshold: 0.0351dynamic_snitch_badness_threshold: 0.1
287352
288# request_scheduler -- Set this to a class that implements353# request_scheduler -- Set this to a class that implements
289# RequestScheduler, which will schedule incoming client requests354# RequestScheduler, which will schedule incoming client requests
@@ -325,32 +390,40 @@
325# the request scheduling. Currently the only valid option is keyspace.390# the request scheduling. Currently the only valid option is keyspace.
326# request_scheduler_id: keyspace391# request_scheduler_id: keyspace
327392
328# The Index Interval determines how large the sampling of row keys393# index_interval controls the sampling of entries from the primrary
329# is for a given SSTable. The larger the sampling, the more effective394# row index in terms of space versus time. The larger the interval,
330# the index is at the cost of space.395# the smaller and less effective the sampling will be. In technicial
396# terms, the interval coresponds to the number of index entries that
397# are skipped between taking each sample. All the sampled entries
398# must fit in memory. Generally, a value between 128 and 512 here
399# coupled with a large key cache size on CFs results in the best trade
400# offs. This value is not often changed, however if you have many
401# very small rows (many to an OS page), then increasing this will
402# often lower memory usage without a impact on performance.
331index_interval: 128403index_interval: 128
332404
333405# Enable or disable inter-node encryption
334#keyspaces:406# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
335#- column_families:407# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
336# - column_type: Standard408# suite for authentication, key exchange and encryption of the actual data transfers.
337# name: Standard1409# NOTE: No custom encryption options are enabled at the moment
338# - column_type: Standard410# The available internode options are : all, none, dc, rack
339# name: Standard2411#
340# - column_type: Standard412# If set to dc cassandra will encrypt the traffic between the DCs
341# compare_with: org.apache.cassandra.db.marshal.TimeUUIDType413# If set to rack cassandra will encrypt the traffic between the racks
342# name: StandardByTime1414#
343# - column_type: Standard415# The passwords used in these options must match the passwords used when generating
344# compare_with: org.apache.cassandra.db.marshal.TimeUUIDType416# the keystore and truststore. For instructions on generating these files, see:
345# name: StandardByTime2417# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
346# - column_type: Super418#
347# name: Super1419encryption_options:
348# - column_type: Super420 internode_encryption: none
349# name: Super2421 keystore: conf/.keystore
350# 422 keystore_password: cassandra
351# name: Keyspace1423 truststore: conf/.truststore
352# replica_placement_strategy: org.apache.cassandra.locator.SimpleStrategy424 truststore_password: cassandra
353# replica_placement_factor: REPLICATION425 # More advanced defaults below:
354#426 # protocol: TLS
355#427 # algorithm: SunX509
356428 # store_type: JKS
429 # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
357430
=== modified file 'AppDB/cassandra/test_cassandra.py' (properties changed: +x to -x)
--- AppDB/cassandra/test_cassandra.py 2011-05-29 21:41:44 +0000
+++ AppDB/cassandra/test_cassandra.py 2012-02-16 06:02:21 +0000
@@ -1,286 +1,89 @@
1#1import py_cassandra
2# Cassandra Interface for AppScale2
3# Rewritten by Navraj Chohan for using range queries3py_cassandra = py_cassandra.DatastoreProxy()
4# Modified by Chris Bunch for upgrade to Cassandra 0.50.04
5# on 2/17/105columns = ["a","b","c"]
6# Original author: suwanny@gmail.com6data = ["1","2","3"]
77table_name = "hello"
8import os,sys8key = "1"
9import time9print "key= " + key
1010print "columns= " + str(columns)
11from thrift_cass.Cassandra import Client11print "data= " + str(data)
12from thrift_cass.ttypes import *12print "table= " + table_name
1313#print py_cassandra.put_entity("__hi__", key, columns, data)
14import string14#print py_cassandra.put_entity("__hah__", key, columns, data)
15import base64 # base64 2009.04.1615#exit(0)
16from dbconstants import *16print py_cassandra.put_entity(table_name, key, columns, data)
17from dbinterface import *17ret = py_cassandra.get_entity(table_name, key, columns)
18#import sqlalchemy.pool as pool18print "doing a put then get"
19import appscale_logger19print ret
20import pycassa20if ret[1:] != data:
21from thrift import Thrift21 print "ERROR doing a put then get. Data does not match"
22from thrift.transport import TSocket22 print "returned: " + str(ret)
23from thrift.transport import TTransport23 print "expected: " + str(data)
24from thrift.protocol import TBinaryProtocol24 exit(1)
25ERROR_DEFAULT = "DB_ERROR:" # ERROR_CASSANDRA25else:
26# Store all schema information in a special table26 print "Success"
27# If a table does not show up in this table, try a range query 27
28# to discover it's schema28ret = py_cassandra.get_schema("hello")
29SCHEMA_TABLE = "__key__"29print ret
30SCHEMA_TABLE_SCHEMA = ['schema']30print "checking schema:"
31# use 1 Table and 1 ColumnFamily in Cassandra31print ret
32MAIN_TABLE = "Keyspace1"32if ret[1:] != columns:
33COLUMN_FAMILY = "Standard1"33 print "ERROR in recieved schema"
3434 print "returned: " + str(ret)
35PERSISTENT_CONNECTION = False35 print "expected: " + str(columns)
36PROFILING = False36
3737ret = py_cassandra.delete_row(table_name, key)
38DEFAULT_HOST = "localhost"38print "Deleting the key %s"%key
39DEFAULT_PORT = 916039print ret
4040
41CONSISTENCY_ZERO = 0 # don't use this for reads41ret = py_cassandra.get_entity(table_name, key, columns)
42CONSISTENCY_ONE = 142print "Trying to get deleted key:"
43CONSISTENCY_QUORUM = 243print ret
44CONSISTENCY_ALL = 5 # don't use this for reads (next version may fix this)44print "doing a put with key %s"%key
4545print py_cassandra.put_entity("hello", "1", ["a","b","c"], ["1","2","3"])
46MAX_ROW_COUNT = 1000000046print "doing a get table"
47table_cache = {}47print py_cassandra.get_table("hello", ["a","b","c"])
48class DatastoreProxy(AppDBInterface):48py_cassandra.put_entity("hello", "2", ["a","b","c"], ["4","5","6"])
49 def __init__(self, logger = appscale_logger.getLogger("datastore-cassandra")):49print "doing get table:"
50 f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')50print py_cassandra.get_table("hello", ["a","b","c"])
51 self.host = DEFAULT_HOST51py_cassandra.put_entity("hello", "3", ["a","b","c"], ["1","2","3"])
52 self.port = DEFAULT_PORT52py_cassandra.get_table("hello", ["a","b","c"])
53 #self.pool = pool.QueuePool(self.__create_connection, reset_on_return=False)53
54 #connection.add_pool('AppScale', [self.host, self.port])54print "TRYING TO REPLACE KEY 3"
55 self.pool = pycassa.ConnectionPool(keyspace='Keyspace1', server_list=[self.host+":"+str(self.port)], prefill=False)55py_cassandra.put_entity("hello", "3", ["a","b","c"], ["1","2","3"])
56 self.logger = logger56print "TRYING TO REPLACE KEY 3"
5757py_cassandra.get_table("hello", ["a","b","c"])
58 def logTiming(self, function, start_time, end_time):58print "TRYING TO REPLACE KEY 3"
59 if PROFILING:59ret = py_cassandra.delete_row("hello", "1")
60 self.logger.debug(function + ": " + str(end_time - start_time) + " s")60print "TRYING TO REPLACE KEY 3"
61 61ret = py_cassandra.delete_row("hello", "2")
62 def get_entity(self, table_name, row_key, column_names):62print "TRYING TO REPLACE KEY 3"
63 error = [ERROR_DEFAULT]63ret = py_cassandra.delete_row("hello", "3")
64 list = error64print "TRYING TO REPLACE KEY 3"
65 client = None65py_cassandra.get_table("hello", ["a","b","c"])
66 row_key = table_name + '/' + row_key66print "Deleting table:"
67 try: 67print py_cassandra.delete_table("hello")
68 slice_predicate = SlicePredicate(column_names=column_names)68print "deleting twice:"
69 path = ColumnPath(COLUMN_FAMILY)69print py_cassandra.delete_table("hello")
70 client = self.__setup_connection()70
71 # Result is a column type which has name, value, timestamp71table_name = u"testing_query"
72 result = client.get_slice(row_key, path, slice_predicate,72print py_cassandra.delete_table(table_name)
73 CONSISTENCY_QUORUM) 73column_names = [u"c1"]
74 for column in column_names:74limit = 1000
75 for r in result:75offset = 0
76 c = r.column76key = 0
77 if column == c.name:77startrow = u"000"
78 list.append(c.value)78endrow = u"100"
79 except NotFoundException: # occurs normally if the item isn't in the db 79data = u"xxx"
80 list[0] += "Not found"80for ii in range(0, 101):
81 self.__close_connection(client)81 key = str(ii)
82 return list82 key = ("0" * (3 - len(key))) + key
83 except Exception, ex:83 key = unicode(key)
84 #self.logger.debug("Exception %s" % ex)84 print "Adding key " + key
85 list[0]+=("Exception: %s"%ex)85 print py_cassandra.put_entity(table_name, key, column_names, [data + key])
86 self.__close_connection(client)86inclusive = 1
87 return list87notJustKeys = 0
88 self.__close_connection(client)88print "SUCCESS"
89 if len(list) == 1:89
90 list[0] += "Not found"
91 return list
92
93
94 def put_entity(self, table_name, row_key, column_names, cell_values):
95 error = [ERROR_DEFAULT]
96 list = error
97 client = None
98
99 # The first time a table is seen
100 if table_name not in table_cache:
101 self.create_table(table_name, column_names)
102
103 row_key = table_name + '/' + row_key
104 client = self.__setup_connection()
105 curtime = self.timestamp()
106 # Result is a column type which has name, value, timestamp
107 mutations = []
108 for index, ii in enumerate(column_names):
109 column = Column(name = ii, value=cell_values[index],
110 timestamp=curtime)
111 c_or_sc = ColumnOrSuperColumn(column=column)
112 mutation = Mutation(column_or_supercolumn=c_or_sc)
113 mutations.append(mutation)
114 mutation_map = {row_key : { COLUMN_FAMILY : mutations } }
115 client.batch_mutate(mutation_map, CONSISTENCY_QUORUM)
116 """except Exception, ex:
117 print "EXCEPTION"
118 self.logger.debug("Exception %s" % ex)
119 list[0]+=("Exception: %s"%ex)
120 self.__close_connection(client)
121 list.append("0")
122 return list
123 """
124 self.__close_connection(client)
125 list.append("0")
126 return list
127
128 def put_entity_dict(self, table_name, row_key, value_dict):
129 raise NotImplementedError("put_entity_dict is not implemented in %s." % self.__class__)
130
131
132 def get_table(self, table_name, column_names):
133 error = [ERROR_DEFAULT]
134 client = None
135 result = error
136 keyslices = []
137 column_parent = ColumnParent(column_family="Standard1")
138 predicate = SlicePredicate(column_names=column_names)
139 start_key = table_name + "/"
140 end_key = table_name + '/~'
141 try:
142 client = self.__setup_connection()
143 keyslices = client.get_range_slice(column_parent,
144 predicate,
145 start_key,
146 end_key,
147 MAX_ROW_COUNT,
148 CONSISTENCY_QUORUM)
149 except Exception, ex:
150 self.logger.debug("Exception %s" % ex)
151 result[0] += "Exception: " + str(ex)
152 self.__close_connection(client)
153 return result
154 for keyslice in keyslices:
155 ordering_dict = {}
156 for c in keyslice.columns:
157 column = c.column
158 value = column.value
159 ordering_dict[column.name] = value
160 if len(ordering_dict) == 0:
161 continue
162 for column in column_names:
163 try:
164 result.append(ordering_dict[column])
165 except:
166 result[0] += "Key error, get_table did not return the correct schema"
167 self.__close_connection(client)
168 return result
169
170 def delete_row(self, table_name, row_key):
171 error = [ERROR_DEFAULT]
172 ret = error
173 client = None
174 row_key = table_name + '/' + row_key
175 path = ColumnPath(COLUMN_FAMILY)
176 try:
177 client = self.__setup_connection()
178 curtime = self.timestamp()
179 # Result is a column type which has name, value, timestamp
180 client.remove(row_key, path, curtime,
181 CONSISTENCY_QUORUM)
182 except Exception, ex:
183 self.logger.debug("Exception %s" % ex)
184 ret[0]+=("Exception: %s"%ex)
185 self.__close_connection(client)
186 return ret
187 self.__close_connection(client)
188 ret.append("0")
189 return ret
190
191 def get_schema(self, table_name):
192 error = [ERROR_DEFAULT]
193 result = error
194 ret = self.get_entity(SCHEMA_TABLE,
195 table_name,
196 SCHEMA_TABLE_SCHEMA)
197 if len(ret) > 1:
198 schema = ret[1]
199 else:
200 error[0] = ret[0] + "--unable to get schema"
201 return error
202 schema = schema.split(':')
203 result = result + schema
204 return result
205
206
207 def delete_table(self, table_name):
208 error = [ERROR_DEFAULT]
209 result = error
210 keyslices = []
211 column_parent = ColumnParent(column_family="Standard1")
212 predicate = SlicePredicate(column_names=[])
213 curtime = self.timestamp()
214 path = ColumnPath(COLUMN_FAMILY)
215 start_key = table_name + "/"
216 end_key = table_name + '/~'
217 try:
218 client = self.__setup_connection()
219 keyslices = client.get_range_slice(column_parent,
220 predicate,
221 start_key,
222 end_key,
223 MAX_ROW_COUNT,
224 CONSISTENCY_QUORUM)
225 except Exception, ex:
226 self.logger.debug("Exception %s" % ex)
227 result[0]+=("Exception: %s"%ex)
228 self.__close_connection(client)
229 return result
230 keys_removed = False
231 for keyslice in keyslices:
232 row_key = keyslice.key
233 client.remove( row_key,
234 path,
235 curtime,
236 CONSISTENCY_QUORUM)
237 keys_removed = True
238 if table_name not in table_cache and keys_removed:
239 result[0] += "Table does not exist"
240 return result
241 if table_name in table_cache:
242 del table_cache[table_name]
243
244 self.__close_connection(client)
245 return result
246
247 # Only stores the schema
248 def create_table(self, table_name, column_names):
249 table_cache[table_name] = 1
250 columns = ':'.join(column_names)
251 row_key = table_name
252 # Get and make sure we are not overwriting previous schemas
253 ret = self.get_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA)
254 if ret[0] != ERROR_DEFAULT:
255 self.put_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA, [columns])
256
257 ######################################################################
258 # private methods
259 ######################################################################
260 def __create_connection(self):
261 socket = TSocket.TSocket(self.host, self.port)
262 transport = TTransport.TBufferedTransport(socket)
263 protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
264 client = Client(protocol)
265 client.transport = transport
266 client.transport.open()
267 #keyspace = KsDef(name=MAIN_TABLE, strategy_class="org.pache.cassandra.locator.SimpleStrategy", replication_factor=1, cf_defs=cfams)
268 #client.system_add_keyspace(MAIN_TABLE)
269 client.set_keyspace(MAIN_TABLE)
270
271 return client
272
273 def __get_connection(self):
274 return connection.get_pool("AppScale")
275
276 def __setup_connection(self):
277 return self.pool.get()
278 #return connection.get_pool("AppScale")
279 #return self.pool.connect()
280
281 def __close_connection(self, client):
282 if client:
283 client.close()
284
285 def timestamp(self):
286 return int(time.time() * 1000)
28790
=== modified file 'AppDB/hadoop/hadoop_helper.rb'
--- AppDB/hadoop/hadoop_helper.rb 2010-12-09 21:17:29 +0000
+++ AppDB/hadoop/hadoop_helper.rb 2012-02-16 06:02:21 +0000
@@ -1,5 +1,5 @@
1require 'djinn'1require 'djinn'
2HADOOP_VER = "0.20.2"2HADOOP_VER = "0.20.2-cdh3u3"
3HADOOP_LOC = "#{APPSCALE_HOME}/AppDB/hadoop-" + HADOOP_VER 3HADOOP_LOC = "#{APPSCALE_HOME}/AppDB/hadoop-" + HADOOP_VER
4HDFS_PORT = 90004HDFS_PORT = 9000
5ENABLE_HADOOP_SINGLE_NODE = true5ENABLE_HADOOP_SINGLE_NODE = true
66
=== modified file 'AppDB/hadoop/patch/hadoop-hbase.patch'
--- AppDB/hadoop/patch/hadoop-hbase.patch 2010-04-08 00:25:36 +0000
+++ AppDB/hadoop/patch/hadoop-hbase.patch 2012-02-16 06:02:21 +0000
@@ -1,18 +1,20 @@
1*** src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java.org 2009-04-08 22:15:30.000000000 -07001*** src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java.org 2009-04-08 22:15:30.000000000 -0700
2--- src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java 2010-04-07 17:12:19.000000000 -07002--- src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java 2010-04-07 17:12:19.000000000 -0700
3***************3***************
4*** 20,26 ****4*** 20,27 ****
5 5
6 import java.io.*;6 import java.io.*;
7 import java.net.*;7 import java.net.*;
8 import java.util.ArrayList;
8! 9!
9 import org.apache.hadoop.fs.permission.FsPermission;10 import org.apache.hadoop.fs.permission.FsPermission;
10 import org.apache.hadoop.fs.*;11 import org.apache.hadoop.fs.*;
11 import org.apache.hadoop.conf.Configuration;12 import org.apache.hadoop.conf.Configuration;
12--- 20,26 ----13--- 20,27 ----
13 14
14 import java.io.*;15 import java.io.*;
15 import java.net.*;16 import java.net.*;
17 import java.util.ArrayList;
16! import org.apache.hadoop.util.*;18! import org.apache.hadoop.util.*;
17 import org.apache.hadoop.fs.permission.FsPermission;19 import org.apache.hadoop.fs.permission.FsPermission;
18 import org.apache.hadoop.fs.*;20 import org.apache.hadoop.fs.*;
1921
=== added file 'AppDB/hadoop/templates/hadoop'
--- AppDB/hadoop/templates/hadoop 1970-01-01 00:00:00 +0000
+++ AppDB/hadoop/templates/hadoop 2012-02-16 06:02:21 +0000
@@ -0,0 +1,468 @@
1#!/usr/bin/env bash
2# Licensed to the Apache Software Foundation (ASF) under one or more
3# contributor license agreements. See the NOTICE file distributed with
4# this work for additional information regarding copyright ownership.
5# The ASF licenses this file to You under the Apache License, Version 2.0
6# (the "License"); you may not use this file except in compliance with
7# the License. You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18# The Hadoop command script
19#
20# Environment Variables
21#
22# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
23#
24# HADOOP_CLASSPATH Extra Java CLASSPATH entries.
25#
26# HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is
27# added in the beginning of the global
28# classpath. Can be defined, for example,
29# by doing
30# export HADOOP_USER_CLASSPATH_FIRST=true
31#
32# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
33# Default is 1000.
34#
35# HADOOP_OPTS Extra Java runtime options.
36#
37# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS
38# HADOOP_CLIENT_OPTS when the respective command is run.
39# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
40# for e.g. HADOOP_CLIENT_OPTS applies to
41# more than one command (fs, dfs, fsck,
42# dfsadmin etc)
43#
44# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
45#
46# HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
47#
48
49bin=`dirname "$0"`
50bin=`cd "$bin"; pwd`
51
52. "$bin"/hadoop-config.sh
53
54HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
55
56cygwin=false
57case "`uname`" in
58CYGWIN*) cygwin=true;;
59esac
60
61# if no args specified, show usage
62if [ $# = 0 ]; then
63 echo "Usage: hadoop [--config confdir] COMMAND"
64 echo "where COMMAND is one of:"
65 echo " namenode -format format the DFS filesystem"
66 echo " secondarynamenode run the DFS secondary namenode"
67 echo " namenode run the DFS namenode"
68 echo " datanode run a DFS datanode"
69 echo " dfsadmin run a DFS admin client"
70 echo " mradmin run a Map-Reduce admin client"
71 echo " fsck run a DFS filesystem checking utility"
72 echo " fs run a generic filesystem user client"
73 echo " balancer run a cluster balancing utility"
74 echo " fetchdt fetch a delegation token from the NameNode"
75 echo " jobtracker run the MapReduce job Tracker node"
76 echo " pipes run a Pipes job"
77 echo " tasktracker run a MapReduce task Tracker node"
78 echo " job manipulate MapReduce jobs"
79 echo " queue get information regarding JobQueues"
80 echo " version print the version"
81 echo " jar <jar> run a jar file"
82 echo " distcp <srcurl> <desturl> copy file or directories recursively"
83 echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
84 echo " oiv apply the offline fsimage viewer to an fsimage"
85 echo " classpath prints the class path needed to get the"
86 echo " dfsgroups get the groups which users belong to on the Name Node"
87 echo " mrgroups get the groups which users belong to on the Job Tracker"
88 echo " Hadoop jar and the required libraries"
89 echo " daemonlog get/set the log level for each daemon"
90 echo " or"
91 echo " CLASSNAME run the class named CLASSNAME"
92 echo "Most commands print help when invoked w/o parameters."
93 exit 1
94fi
95
96# get arguments
97COMMAND=$1
98shift
99
100if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
101 . "${HADOOP_CONF_DIR}/hadoop-env.sh"
102fi
103
104# some Java parameters
105if [ "$JAVA_HOME" != "" ]; then
106 #echo "run java in $JAVA_HOME"
107 JAVA_HOME=$JAVA_HOME
108fi
109
110if [ "$JAVA_HOME" = "" ]; then
111 echo "Error: JAVA_HOME is not set."
112 exit 1
113fi
114
115JAVA=$JAVA_HOME/bin/java
116JAVA_HEAP_MAX=-Xmx1000m
117
118# check envvars which might override default args
119if [ "$HADOOP_HEAPSIZE" != "" ]; then
120 #echo "run with heapsize $HADOOP_HEAPSIZE"
121 JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"
122 #echo $JAVA_HEAP_MAX
123fi
124
125# CLASSPATH initially contains $HADOOP_CONF_DIR
126CLASSPATH="${HADOOP_CONF_DIR}"
127CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
128if [ "$HADOOP_USER_CLASSPATH_FIRST" != "" ] && [ "$HADOOP_CLASSPATH" != "" ] ; then
129 CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
130fi
131
132# for developers, add Hadoop classes to CLASSPATH
133if [ -d "$HADOOP_HOME/build/classes" ]; then
134 CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
135fi
136if [ -d "$HADOOP_HOME/build/webapps" ]; then
137 CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
138fi
139if [ -d "$HADOOP_HOME/build/test/classes" ]; then
140 CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
141fi
142if [ -d "$HADOOP_HOME/build/tools" ]; then
143 CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools
144fi
145
146# so that filenames w/ spaces are handled correctly in loops below
147IFS=
148
149# for releases, add core hadoop jar & webapps to CLASSPATH
150if [ -d "$HADOOP_HOME/webapps" ]; then
151 CLASSPATH=${CLASSPATH}:$HADOOP_HOME
152fi
153for f in $HADOOP_HOME/hadoop-core-*.jar; do
154 CLASSPATH=${CLASSPATH}:$f;
155done
156
157# add libs to CLASSPATH
158for f in $HADOOP_HOME/lib/*.jar; do
159 CLASSPATH=${CLASSPATH}:$f;
160done
161
162if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then
163for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do
164 CLASSPATH=${CLASSPATH}:$f;
165done
166fi
167
168for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
169 CLASSPATH=${CLASSPATH}:$f;
170done
171
172for f in $HADOOP_HOME/hadoop-tools-*.jar; do
173 TOOL_PATH=${TOOL_PATH}:$f;
174done
175for f in $HADOOP_HOME/build/hadoop-tools-*.jar; do
176 TOOL_PATH=${TOOL_PATH}:$f;
177done
178
179# add user-specified CLASSPATH last
180if [ "$HADOOP_USER_CLASSPATH_FIRST" = "" ] && [ "$HADOOP_CLASSPATH" != "" ]; then
181 CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
182fi
183
184# default log directory & file
185if [ "$HADOOP_LOG_DIR" = "" ]; then
186 HADOOP_LOG_DIR="$HADOOP_HOME/logs"
187fi
188if [ "$HADOOP_LOGFILE" = "" ]; then
189 HADOOP_LOGFILE='hadoop.log'
190fi
191
192# default policy file for service-level authorization
193if [ "$HADOOP_POLICYFILE" = "" ]; then
194 HADOOP_POLICYFILE="hadoop-policy.xml"
195fi
196
197# restore ordinary behaviour
198unset IFS
199
200# figure out which class to run
201if [ "$COMMAND" = "classpath" ] ; then
202 if $cygwin; then
203 CLASSPATH=`cygpath -p -w "$CLASSPATH"`
204 fi
205 echo $CLASSPATH
206 exit
207elif [ "$COMMAND" = "namenode" ] ; then
208 CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
209 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
210elif [ "$COMMAND" = "secondarynamenode" ] ; then
211 CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
212 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
213elif [ "$COMMAND" = "datanode" ] ; then
214 CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
215 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_OPTS"
216elif [ "$COMMAND" = "fs" ] ; then
217 CLASS=org.apache.hadoop.fs.FsShell
218 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
219elif [ "$COMMAND" = "dfs" ] ; then
220 CLASS=org.apache.hadoop.fs.FsShell
221 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
222elif [ "$COMMAND" = "dfsadmin" ] ; then
223 CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
224 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
225elif [ "$COMMAND" = "mradmin" ] ; then
226 CLASS=org.apache.hadoop.mapred.tools.MRAdmin
227 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
228elif [ "$COMMAND" = "fsck" ] ; then
229 CLASS=org.apache.hadoop.hdfs.tools.DFSck
230 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
231elif [ "$COMMAND" = "balancer" ] ; then
232 CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
233 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
234elif [ "$COMMAND" = "fetchdt" ] ; then
235 CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
236elif [ "$COMMAND" = "jobtracker" ] ; then
237 CLASS=org.apache.hadoop.mapred.JobTracker
238 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS"
239elif [ "$COMMAND" = "tasktracker" ] ; then
240 CLASS=org.apache.hadoop.mapred.TaskTracker
241 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS"
242elif [ "$COMMAND" = "job" ] ; then
243 CLASS=org.apache.hadoop.mapred.JobClient
244 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
245elif [ "$COMMAND" = "queue" ] ; then
246 CLASS=org.apache.hadoop.mapred.JobQueueClient
247 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
248elif [ "$COMMAND" = "pipes" ] ; then
249 CLASS=org.apache.hadoop.mapred.pipes.Submitter
250 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
251elif [ "$COMMAND" = "version" ] ; then
252 CLASS=org.apache.hadoop.util.VersionInfo
253 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
254elif [ "$COMMAND" = "jar" ] ; then
255 CLASS=org.apache.hadoop.util.RunJar
256 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
257elif [ "$COMMAND" = "distcp" ] ; then
258 CLASS=org.apache.hadoop.tools.DistCp
259 CLASSPATH=${CLASSPATH}:${TOOL_PATH}
260 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
261elif [ "$COMMAND" = "daemonlog" ] ; then
262 CLASS=org.apache.hadoop.log.LogLevel
263 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
264elif [ "$COMMAND" = "archive" ] ; then
265 CLASS=org.apache.hadoop.tools.HadoopArchives
266 CLASSPATH=${CLASSPATH}:${TOOL_PATH}
267 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
268elif [ "$COMMAND" = "oiv" ] ; then
269 CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
270 CLASSPATH=${CLASSPATH}:${TOOL_PATH}
271 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
272elif [ "$COMMAND" = "sampler" ] ; then
273 CLASS=org.apache.hadoop.mapred.lib.InputSampler
274 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
275elif [ "$COMMAND" = "dfsgroups" ] ; then
276 CLASS=org.apache.hadoop.hdfs.tools.GetGroups
277 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
278elif [ "$COMMAND" = "mrgroups" ] ; then
279 CLASS=org.apache.hadoop.mapred.tools.GetGroups
280 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
281elif [[ "$COMMAND" = -* ]] ; then
282 # class and package names cannot begin with a -
283 echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
284 exit 1
285else
286 CLASS=$COMMAND
287 HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
288fi
289
290# cygwin path translation
291if $cygwin; then
292 CLASSPATH=`cygpath -p -w "$CLASSPATH"`
293 HADOOP_HOME=`cygpath -w "$HADOOP_HOME"`
294 HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
295 TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
296 JAVA_LIBRARY_PATH=`cygpath -p -w "$JAVA_LIBRARY_PATH"`
297fi
298
299# setup 'java.library.path' for native-hadoop code if necessary
300if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" -o -d "${HADOOP_HOME}/sbin" ]; then
301 JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
302
303 if [ -d "$HADOOP_HOME/build/native" ]; then
304 if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
305 JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
306 else
307 JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
308 fi
309 fi
310
311 if [ -d "${HADOOP_HOME}/lib/native" ]; then
312 if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
313 JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
314 else
315 JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
316 fi
317 fi
318
319 _JSVC_PATH=${HADOOP_HOME}/sbin/${JAVA_PLATFORM}/jsvc
320fi
321
322# cygwin path translation
323if $cygwin; then
324 JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
325fi
326
327HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
328HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
329HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
330HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
331HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
332if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
333 HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
334fi
335HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
336
337
338###########################################################################
339# DAEMON SETTINGS
340###########################################################################
341# For any command that ends in 'node', we are starting one of the daemons.
342# In this case, we do some special processing in order to automatically
343# setuid to the correct user.
344#
345# The user itself is determined as one of the following, in descending
346# precedence:
347# HADOOP_<node>NODE_USER variable
348# the current userid, so long as that userid is not root
349#
350# After the above is determined, it is stored into the local variable
351# _HADOOP_DAEMON_USER
352#
353# We also need to determine the "run mode". This can be one of the following:
354#
355# "jsvc" - only supported for the datanode - we use the jsvc wrapper in
356# the sbin/<platform> directory in order to setuid to the target
357# user. Requires that this script is running as root.
358# "su" - supported only when running as root and /bin/su exists.
359# Uses su in order to assume the identity of the daemon user.
360# "normal" - supported only when already running as the target user.
361###########################################################################
362if [[ "$COMMAND" == *node ]] || [[ "$COMMAND" == *tracker ]]; then
363 command_uc=$(echo $COMMAND| tr a-z A-Z)
364 user_var="HADOOP_${command_uc}_USER"
365 _HADOOP_DAEMON_USER=$(eval "echo \$$user_var")
366 _HADOOP_DAEMON_USER=${_HADOOP_DAEMON_USER:-$(id -un)}
367
368 if [ -z "$_HADOOP_DAEMON_USER" ]; then
369 echo Please specify a user to run the $COMMAND by setting $user_var
370 #exit 1
371 elif [ "$_HADOOP_DAEMON_USER" == "root" ]; then
372 echo May not run daemons as root. Please specify $user_var
373 #exit 1
374 fi
375
376 if [ "$EUID" = "0" ] ; then
377 if [ "$COMMAND" == "datanode" ] && [ -x "$_JSVC_PATH" ]; then
378 _HADOOP_RUN_MODE="jsvc"
379 elif [ -x /bin/su ]; then
380 _HADOOP_RUN_MODE="su"
381 else
382 echo "Daemon wants to run as $_HADOOP_DAEMON_USER but script is running as root"
383 echo "and su is not available."
384 #exit 1
385 fi
386 else
387 # We must be running as the user we want to run as, if we can't use jsvc or su
388 # to drop privileges
389 if [ "$_HADOOP_DAEMON_USER" != "$(whoami)" ]; then
390 echo Daemon wants to run as $_HADOOP_DAEMON_USER but not running as that user or root.
391 #exit 1
392 fi
393 _HADOOP_RUN_MODE="normal"
394 fi
395else
396 # Normal client command
397 _HADOOP_RUN_MODE="normal"
398fi
399
400###########################################################################
401# Actually run the JVM
402###########################################################################
403case "$_HADOOP_RUN_MODE" in
404 jsvc)
405 case "$COMMAND" in
406 datanode)
407 _JSVC_STARTER_CLASS=org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter
408 ;;
409 *)
410 echo "Cannot start $COMMAND with jsvc"
411 exit 1
412 ;;
413 esac
414
415 if [ "$_HADOOP_DAEMON_DETACHED" = "true" ]; then
416 _JSVC_FLAGS="-pidfile $_HADOOP_DAEMON_PIDFILE
417 -errfile &1
418 -outfile $_HADOOP_DAEMON_OUT"
419 else
420 # Even though we are trying to run a non-detached datanode,
421 # jsvc will not write to stdout/stderr, so we have to pipe
422 # it and tail the logfile.
423 _JSVC_FLAGS="-nodetach
424 -errfile &1
425 -outfile $HADOOP_LOG_DIR/jsvc.out"
426 echo Non-detached jsvc output piping to: $HADOOP_LOG_DIR/jsvc.out
427 touch $HADOOP_LOG_DIR/jsvc.out
428 tail -f $HADOOP_LOG_DIR/jsvc.out &
429 fi
430 unset _HADOOP_DAEMON_DETACHED
431
432 exec "$_JSVC_PATH" -Dproc_$COMMAND \
433 $_JSVC_FLAGS \
434 -user "$_HADOOP_DAEMON_USER" \
435 -cp "$CLASSPATH" \
436 $JAVA_HEAP_MAX $HADOOP_OPTS \
437 $_JSVC_STARTER_CLASS "$@"
438 ;;
439
440 normal | su)
441 # If we need to su, tack the command into a local variable
442 if [ $_HADOOP_RUN_MODE = "su" ]; then
443 _JAVA_EXEC="su $_HADOOP_DAEMON_USER -s $JAVA --"
444 else
445 _JAVA_EXEC="$JAVA"
446 fi
447
448 if [ "$_HADOOP_DAEMON_DETACHED" = "true" ]; then
449 unset _HADOOP_DAEMON_DETACHED
450 touch $_HADOOP_DAEMON_OUT
451 nohup $_JAVA_EXEC -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" > "$_HADOOP_DAEMON_OUT" 2>&1 < /dev/null &
452 if [ "$EUID" == "0" ]; then
453 chown $_HADOOP_DAEMON_USER $_HADOOP_DAEMON_OUT
454 fi
455 echo $! > "$_HADOOP_DAEMON_PIDFILE"
456 sleep 1
457 head "$_HADOOP_DAEMON_OUT"
458 else
459 # For normal operation, just run the command
460 exec $_JAVA_EXEC -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
461 fi
462 ;;
463
464 *)
465 echo Bad run mode: $_HADOOP_RUN_MODE
466 exit 1
467 ;;
468esac
0469
=== modified file 'AppDB/hbase/hbase-status.sh'
--- AppDB/hbase/hbase-status.sh 2010-11-18 09:50:11 +0000
+++ AppDB/hbase/hbase-status.sh 2012-02-16 06:02:21 +0000
@@ -1,6 +1,6 @@
1#!/bin/sh1#!/bin/sh
22
3val=`echo "status 'summary'" | ${APPSCALE_HOME}/AppDB/hbase/hbase-0.89.20100924/bin/hbase shell | mawk '/load$/{print $1}'`3val=`echo "status 'summary'" | ${APPSCALE_HOME}/AppDB/hbase/hbase-0.90.4-cdh3u3/bin/hbase shell | mawk '/load$/{print $1}'`
4if [ -z "$val" ]; then4if [ -z "$val" ]; then
5 echo "0"5 echo "0"
6else6else
77
=== modified file 'AppDB/hbase/hbase_helper.rb'
--- AppDB/hbase/hbase_helper.rb 2010-12-09 21:17:29 +0000
+++ AppDB/hbase/hbase_helper.rb 2012-02-16 06:02:21 +0000
@@ -3,7 +3,7 @@
3require 'helperfunctions'3require 'helperfunctions'
4require "#{APPSCALE_HOME}/AppDB/hadoop/hadoop_helper"4require "#{APPSCALE_HOME}/AppDB/hadoop/hadoop_helper"
55
6HBASE_LOC = "#{APPSCALE_HOME}/AppDB/hbase/hbase-0.89.20100924"6HBASE_LOC = "#{APPSCALE_HOME}/AppDB/hbase/hbase-0.90.4-cdh3u3"
7THRIFT_PORT = 90907THRIFT_PORT = 9090
8MASTER_SERVER_PORT = 600008MASTER_SERVER_PORT = 60000
9ENABLE_SINGLE_NODE = true9ENABLE_SINGLE_NODE = true
1010
=== added file 'AppDB/hbase/patch/HMaster.java'
--- AppDB/hbase/patch/HMaster.java 1970-01-01 00:00:00 +0000
+++ AppDB/hbase/patch/HMaster.java 2012-02-16 06:02:21 +0000
@@ -0,0 +1,1171 @@
1/**
2 * Copyright 2011 The Apache Software Foundation
3 *
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20package org.apache.hadoop.hbase.master;
21
22import java.io.IOException;
23import java.lang.reflect.Constructor;
24import java.lang.reflect.InvocationTargetException;
25import java.net.InetSocketAddress;
26import java.net.UnknownHostException;
27import java.util.ArrayList;
28import java.util.HashMap;
29import java.util.List;
30import java.util.Map;
31import java.util.concurrent.atomic.AtomicReference;
32
33import org.apache.commons.logging.Log;
34import org.apache.commons.logging.LogFactory;
35import org.apache.hadoop.conf.Configuration;
36import org.apache.hadoop.hbase.Chore;
37import org.apache.hadoop.hbase.ClusterStatus;
38import org.apache.hadoop.hbase.HColumnDescriptor;
39import org.apache.hadoop.hbase.HConstants;
40import org.apache.hadoop.hbase.HMsg;
41import org.apache.hadoop.hbase.HRegionInfo;
42import org.apache.hadoop.hbase.HServerAddress;
43import org.apache.hadoop.hbase.HServerInfo;
44import org.apache.hadoop.hbase.HTableDescriptor;
45import org.apache.hadoop.hbase.MasterNotRunningException;
46import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
47import org.apache.hadoop.hbase.Server;
48import org.apache.hadoop.hbase.TableExistsException;
49import org.apache.hadoop.hbase.TableNotDisabledException;
50import org.apache.hadoop.hbase.TableNotFoundException;
51import org.apache.hadoop.hbase.UnknownRegionException;
52import org.apache.hadoop.hbase.catalog.CatalogTracker;
53import org.apache.hadoop.hbase.catalog.MetaEditor;
54import org.apache.hadoop.hbase.catalog.MetaReader;
55import org.apache.hadoop.hbase.client.MetaScanner;
56import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
57import org.apache.hadoop.hbase.client.Result;
58import org.apache.hadoop.hbase.executor.ExecutorService;
59import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
60import org.apache.hadoop.hbase.ipc.HBaseRPC;
61import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
62import org.apache.hadoop.hbase.ipc.HBaseServer;
63import org.apache.hadoop.hbase.ipc.HMasterInterface;
64import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
65import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan;
66import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
67import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
68import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
69import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
70import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
71import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
72import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
73import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
74import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
75import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
76import org.apache.hadoop.hbase.monitoring.MonitoredTask;
77import org.apache.hadoop.hbase.monitoring.TaskMonitor;
78import org.apache.hadoop.hbase.regionserver.HRegion;
79import org.apache.hadoop.hbase.regionserver.wal.HLog;
80import org.apache.hadoop.hbase.replication.regionserver.Replication;
81import org.apache.hadoop.hbase.security.User;
82import org.apache.hadoop.hbase.util.Bytes;
83import org.apache.hadoop.hbase.util.InfoServer;
84import org.apache.hadoop.hbase.util.Pair;
85import org.apache.hadoop.hbase.util.Sleeper;
86import org.apache.hadoop.hbase.util.Strings;
87import org.apache.hadoop.hbase.util.Threads;
88import org.apache.hadoop.hbase.util.VersionInfo;
89import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
90import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
91import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
92import org.apache.hadoop.io.MapWritable;
93import org.apache.hadoop.io.Text;
94import org.apache.hadoop.net.DNS;
95import org.apache.zookeeper.KeeperException;
96import org.apache.zookeeper.Watcher;
97
98/**
99 * HMaster is the "master server" for HBase. An HBase cluster has one active
100 * master. If many masters are started, all compete. Whichever wins goes on to
101 * run the cluster. All others park themselves in their constructor until
102 * master or cluster shutdown or until the active master loses its lease in
103 * zookeeper. Thereafter, all running master jostle to take over master role.
104 *
105 * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}. In
106 * this case it will tell all regionservers to go down and then wait on them
107 * all reporting in that they are down. This master will then shut itself down.
108 *
109 * <p>You can also shutdown just this master. Call {@link #stopMaster()}.
110 *
111 * @see HMasterInterface
112 * @see HMasterRegionInterface
113 * @see Watcher
114 */
115public class HMaster extends Thread
116implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
117 private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
118
119 // MASTER is name of the webapp and the attribute name used stuffing this
120 //instance into web context.
121 public static final String MASTER = "master";
122
123 // The configuration for the Master
124 private final Configuration conf;
125 // server for the web ui
126 private InfoServer infoServer;
127
128 // Our zk client.
129 private ZooKeeperWatcher zooKeeper;
130 // Manager and zk listener for master election
131 private ActiveMasterManager activeMasterManager;
132 // Region server tracker
133 private RegionServerTracker regionServerTracker;
134
135 // RPC server for the HMaster
136 private final HBaseServer rpcServer;
137 // Address of the HMaster
138 private final HServerAddress address;
139 // Metrics for the HMaster
140 private final MasterMetrics metrics;
141 // file system manager for the master FS operations
142 private MasterFileSystem fileSystemManager;
143
144 // server manager to deal with region server info
145 private ServerManager serverManager;
146
147 // manager of assignment nodes in zookeeper
148 AssignmentManager assignmentManager;
149 // manager of catalog regions
150 private CatalogTracker catalogTracker;
151 // Cluster status zk tracker and local setter
152 private ClusterStatusTracker clusterStatusTracker;
153
154 // buffer for "fatal error" notices from region servers
155 // in the cluster. This is only used for assisting
156 // operations/debugging.
157 private MemoryBoundedLogMessageBuffer rsFatals;
158
159 // This flag is for stopping this Master instance. Its set when we are
160 // stopping or aborting
161 private volatile boolean stopped = false;
162 // Set on abort -- usually failure of our zk session.
163 private volatile boolean abort = false;
164 // flag set after we become the active master (used for testing)
165 private volatile boolean isActiveMaster = false;
166 // flag set after we complete initialization once active (used for testing)
167 private volatile boolean initialized = false;
168
169 // Instance of the hbase executor service.
170 ExecutorService executorService;
171
172 private LoadBalancer balancer;
173 private Thread balancerChore;
174 // If 'true', the balancer is 'on'. If 'false', the balancer will not run.
175 private volatile boolean balanceSwitch = true;
176
177 private Thread catalogJanitorChore;
178 private LogCleaner logCleaner;
179
180 /**
181 * Initializes the HMaster. The steps are as follows:
182 * <p>
183 * <ol>
184 * <li>Initialize HMaster RPC and address
185 * <li>Connect to ZooKeeper.
186 * </ol>
187 * <p>
188 * Remaining steps of initialization occur in {@link #run()} so that they
189 * run in their own thread rather than within the context of the constructor.
190 * @throws InterruptedException
191 */
192 public HMaster(final Configuration conf)
193 throws IOException, KeeperException, InterruptedException {
194 this.conf = conf;
195
196 /*
197 * Determine address and initialize RPC server (but do not start).
198 * The RPC server ports can be ephemeral. Create a ZKW instance.
199 */
200 HServerAddress a = new HServerAddress(getMyAddress(this.conf));
201 int numHandlers = conf.getInt("hbase.regionserver.handler.count", 10);
202 this.rpcServer = HBaseRPC.getServer(this,
203 new Class<?>[]{HMasterInterface.class, HMasterRegionInterface.class},
204 a.getBindAddress(), a.getPort(),
205 numHandlers,
206 0, // we dont use high priority handlers in master
207 false, conf,
208 0); // this is a DNC w/o high priority handlers
209 this.address = new HServerAddress(rpcServer.getListenerAddress());
210
211 // initialize server principal (if using secure Hadoop)
212 User.login(conf, "hbase.master.keytab.file",
213 "hbase.master.kerberos.principal", this.address.getHostname());
214
215 // set the thread name now we have an address
216 setName(MASTER + "-" + this.address);
217
218 Replication.decorateMasterConfiguration(this.conf);
219
220 this.rpcServer.startThreads();
221
222 // Hack! Maps DFSClient => Master for logs. HDFS made this
223 // config param for task trackers, but we can piggyback off of it.
224 if (this.conf.get("mapred.task.id") == null) {
225 this.conf.set("mapred.task.id", "hb_m_" + this.address.toString() +
226 "_" + System.currentTimeMillis());
227 }
228
229 this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" +
230 address.getPort(), this);
231
232 this.metrics = new MasterMetrics(getServerName());
233 }
234
235 /**
236 * Stall startup if we are designated a backup master; i.e. we want someone
237 * else to become the master before proceeding.
238 * @param c
239 * @param amm
240 * @throws InterruptedException
241 */
242 private static void stallIfBackupMaster(final Configuration c,
243 final ActiveMasterManager amm)
244 throws InterruptedException {
245 // If we're a backup master, stall until a primary to writes his address
246 if (!c.getBoolean(HConstants.MASTER_TYPE_BACKUP,
247 HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
248 return;
249 }
250 LOG.debug("HMaster started in backup mode. " +
251 "Stalling until master znode is written.");
252 // This will only be a minute or so while the cluster starts up,
253 // so don't worry about setting watches on the parent znode
254 while (!amm.isActiveMaster()) {
255 LOG.debug("Waiting for master address ZNode to be written " +
256 "(Also watching cluster state node)");
257 Thread.sleep(c.getInt("zookeeper.session.timeout", 180 * 1000));
258 }
259 }
260
261 /**
262 * Main processing loop for the HMaster.
263 * <ol>
264 * <li>Block until becoming active master
265 * <li>Finish initialization via {@link #finishInitialization()}
266 * <li>Enter loop until we are stopped
267 * <li>Stop services and perform cleanup once stopped
268 * </ol>
269 */
270 @Override
271 public void run() {
272 MonitoredTask startupStatus =
273 TaskMonitor.get().createStatus("Master startup");
274 startupStatus.setDescription("Master startup");
275 try {
276 /*
277 * Block on becoming the active master.
278 *
279 * We race with other masters to write our address into ZooKeeper. If we
280 * succeed, we are the primary/active master and finish initialization.
281 *
282 * If we do not succeed, there is another active master and we should
283 * now wait until it dies to try and become the next active master. If we
284 * do not succeed on our first attempt, this is no longer a cluster startup.
285 */
286 this.activeMasterManager = new ActiveMasterManager(zooKeeper, address,
287 this);
288 this.zooKeeper.registerListener(activeMasterManager);
289 stallIfBackupMaster(this.conf, this.activeMasterManager);
290 this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus);
291 // We are either the active master or we were asked to shutdown
292 if (!this.stopped) {
293 finishInitialization(startupStatus);
294 loop();
295 }
296 } catch (Throwable t) {
297 abort("Unhandled exception. Starting shutdown.", t);
298 } finally {
299 startupStatus.cleanup();
300
301 stopChores();
302 // Wait for all the remaining region servers to report in IFF we were
303 // running a cluster shutdown AND we were NOT aborting.
304 if (!this.abort && this.serverManager != null &&
305 this.serverManager.isClusterShutdown()) {
306 this.serverManager.letRegionServersShutdown();
307 }
308 stopServiceThreads();
309 // Stop services started for both backup and active masters
310 if (this.activeMasterManager != null) this.activeMasterManager.stop();
311 if (this.catalogTracker != null) this.catalogTracker.stop();
312 if (this.serverManager != null) this.serverManager.stop();
313 if (this.assignmentManager != null) this.assignmentManager.stop();
314 if (this.fileSystemManager != null) this.fileSystemManager.stop();
315 this.zooKeeper.close();
316 }
317 LOG.info("HMaster main thread exiting");
318 }
319
320 private void loop() {
321 // Check if we should stop every second.
322 Sleeper sleeper = new Sleeper(1000, this);
323 while (!this.stopped) {
324 sleeper.sleep();
325 }
326 }
327
328 /**
329 * Finish initialization of HMaster after becoming the primary master.
330 *
331 * <ol>
332 * <li>Initialize master components - file system manager, server manager,
333 * assignment manager, region server tracker, catalog tracker, etc</li>
334 * <li>Start necessary service threads - rpc server, info server,
335 * executor services, etc</li>
336 * <li>Set cluster as UP in ZooKeeper</li>
337 * <li>Wait for RegionServers to check-in</li>
338 * <li>Split logs and perform data recovery, if necessary</li>
339 * <li>Ensure assignment of root and meta regions<li>
340 * <li>Handle either fresh cluster start or master failover</li>
341 * </ol>
342 *
343 * @throws IOException
344 * @throws InterruptedException
345 * @throws KeeperException
346 */
347 private void finishInitialization(MonitoredTask status)
348 throws IOException, InterruptedException, KeeperException {
349
350 isActiveMaster = true;
351
352 /*
353 * We are active master now... go initialize components we need to run.
354 * Note, there may be dross in zk from previous runs; it'll get addressed
355 * below after we determine if cluster startup or failover.
356 */
357
358 status.setStatus("Initializing Master file system");
359 // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
360 this.fileSystemManager = new MasterFileSystem(this, metrics);
361 this.executorService = new ExecutorService(getServerName());
362 this.rsFatals = new MemoryBoundedLogMessageBuffer(
363 conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
364
365 this.serverManager = new ServerManager(this, this, metrics);
366
367 status.setStatus("Initializing ZK system trackers");
368 this.catalogTracker = new CatalogTracker(this.zooKeeper, this.conf,
369 this, conf.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE));
370 this.catalogTracker.start();
371
372 this.assignmentManager = new AssignmentManager(this, serverManager,
373 this.catalogTracker, this.executorService);
374 this.balancer = new LoadBalancer(conf);
375 zooKeeper.registerListenerFirst(assignmentManager);
376
377 this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
378 this.serverManager);
379 this.regionServerTracker.start();
380
381 // Set the cluster as up. If new RSs, they'll be waiting on this before
382 // going ahead with their startup.
383 this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this);
384 this.clusterStatusTracker.start();
385 boolean wasUp = this.clusterStatusTracker.isClusterUp();
386 if (!wasUp) this.clusterStatusTracker.setClusterUp();
387
388 LOG.info("Server active/primary master; " + this.address +
389 ", sessionid=0x" +
390 Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId()) +
391 ", cluster-up flag was=" + wasUp);
392
393 // start up all service threads.
394 status.setStatus("Initializing master service threads");
395 startServiceThreads();
396
397 // Wait for region servers to report in. Returns count of regions.
398 int regionCount = this.serverManager.waitForRegionServers(status);
399
400 // TODO: Should do this in background rather than block master startup
401 status.setStatus("Splitting logs after master startup");
402 this.fileSystemManager.
403 splitLogAfterStartup(this.serverManager.getOnlineServers());
404
405 // Make sure root and meta assigned before proceeding.
406 assignRootAndMeta(status);
407
408 // Is this fresh start with no regions assigned or are we a master joining
409 // an already-running cluster? If regionsCount == 0, then for sure a
410 // fresh start. TOOD: Be fancier. If regionsCount == 2, perhaps the
411 // 2 are .META. and -ROOT- and we should fall into the fresh startup
412 // branch below. For now, do processFailover.
413 if (regionCount == 0) {
414 LOG.info("Master startup proceeding: cluster startup");
415 this.assignmentManager.cleanoutUnassigned();
416 this.assignmentManager.assignAllUserRegions();
417 } else {
418 LOG.info("Master startup proceeding: master failover");
419 this.assignmentManager.processFailover();
420 }
421
422 // Fixing up missing daughters if any
423 status.setStatus("Fixing up missing daughters");
424 fixupDaughters(status);
425
426 // Start balancer and meta catalog janitor after meta and regions have
427 // been assigned.
428 status.setStatus("Starting balancer and catalog janitor");
429 this.balancerChore = getAndStartBalancerChore(this);
430 this.catalogJanitorChore =
431 Threads.setDaemonThreadRunning(new CatalogJanitor(this, this));
432
433 status.markComplete("Initialization successful");
434 LOG.info("Master has completed initialization");
435 initialized = true;
436 }
437
438 /**
439 * Check <code>-ROOT-</code> and <code>.META.</code> are assigned. If not,
440 * assign them.
441 * @throws InterruptedException
442 * @throws IOException
443 * @throws KeeperException
444 * @return Count of regions we assigned.
445 */
446 int assignRootAndMeta(MonitoredTask status)
447 throws InterruptedException, IOException, KeeperException {
448 int assigned = 0;
449 long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
450
451 // Work on ROOT region. Is it in zk in transition?
452 status.setStatus("Assigning ROOT region");
453 boolean rit = this.assignmentManager.
454 processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.ROOT_REGIONINFO);
455 if (!catalogTracker.verifyRootRegionLocation(timeout)) {
456 this.assignmentManager.assignRoot();
457 this.catalogTracker.waitForRoot();
458 assigned++;
459 }
460 LOG.info("-ROOT- assigned=" + assigned + ", rit=" + rit +
461 ", location=" + catalogTracker.getRootLocation());
462
463 // Work on meta region
464 status.setStatus("Assigning META region");
465 rit = this.assignmentManager.
466 processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.FIRST_META_REGIONINFO);
467 if (!this.catalogTracker.verifyMetaRegionLocation(timeout)) {
468 this.assignmentManager.assignMeta();
469 this.catalogTracker.waitForMeta();
470 // Above check waits for general meta availability but this does not
471 // guarantee that the transition has completed
472 this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
473 assigned++;
474 }
475 LOG.info(".META. assigned=" + assigned + ", rit=" + rit +
476 ", location=" + catalogTracker.getMetaLocation());
477 status.setStatus("META and ROOT assigned.");
478 return assigned;
479 }
480
481 void fixupDaughters(final MonitoredTask status) throws IOException {
482 final Map<HRegionInfo, Result> offlineSplitParents =
483 new HashMap<HRegionInfo, Result>();
484 // This visitor collects offline split parents in the .META. table
485 MetaReader.Visitor visitor = new MetaReader.Visitor() {
486 @Override
487 public boolean visit(Result r) throws IOException {
488 if (r == null || r.isEmpty()) return true;
489 HRegionInfo info = CatalogJanitor.getHRegionInfo(r);
490 if (info == null) return true; // Keep scanning
491 if (info.isOffline() && info.isSplit()) {
492 offlineSplitParents.put(info, r);
493 }
494 // Returning true means "keep scanning"
495 return true;
496 }
497 };
498 // Run full scan of .META. catalog table passing in our custom visitor
499 MetaReader.fullScan(this.catalogTracker, visitor);
500 // Now work on our list of found parents. See if any we can clean up.
501 int fixups = 0;
502 for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
503 fixups += ServerShutdownHandler.fixupDaughters(
504 e.getValue(), assignmentManager, catalogTracker);
505 }
506 if (fixups != 0) {
507 LOG.info("Scanned the catalog and fixed up " + fixups +
508 " missing daughter region(s)");
509 }
510 }
511
512 /*
513 * @return This masters' address.
514 * @throws UnknownHostException
515 */
516 private static String getMyAddress(final Configuration c)
517 throws UnknownHostException {
518 // Find out our address up in DNS.
519 String s = DNS.getDefaultIP(c.get("hbase.master.dns.interface","default"));
520 s += ":" + c.get(HConstants.MASTER_PORT,
521 Integer.toString(HConstants.DEFAULT_MASTER_PORT));
522 return s;
523 }
524
525 /** @return HServerAddress of the master server */
526 public HServerAddress getMasterAddress() {
527 return this.address;
528 }
529
530 public long getProtocolVersion(String protocol, long clientVersion) {
531 return HBaseRPCProtocolVersion.versionID;
532 }
533
534 /** @return InfoServer object. Maybe null.*/
535 public InfoServer getInfoServer() {
536 return this.infoServer;
537 }
538
539 @Override
540 public Configuration getConfiguration() {
541 return this.conf;
542 }
543
544 @Override
545 public ServerManager getServerManager() {
546 return this.serverManager;
547 }
548
549 @Override
550 public ExecutorService getExecutorService() {
551 return this.executorService;
552 }
553
554 @Override
555 public MasterFileSystem getMasterFileSystem() {
556 return this.fileSystemManager;
557 }
558
559 /**
560 * Get the ZK wrapper object - needed by master_jsp.java
561 * @return the zookeeper wrapper
562 */
563 public ZooKeeperWatcher getZooKeeperWatcher() {
564 return this.zooKeeper;
565 }
566
567 /*
568 * Start up all services. If any of these threads gets an unhandled exception
569 * then they just die with a logged message. This should be fine because
570 * in general, we do not expect the master to get such unhandled exceptions
571 * as OOMEs; it should be lightly loaded. See what HRegionServer does if
572 * need to install an unexpected exception handler.
573 */
574 private void startServiceThreads() throws IOException{
575
576 // Start the executor service pools
577 this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
578 conf.getInt("hbase.master.executor.openregion.threads", 5));
579 this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
580 conf.getInt("hbase.master.executor.closeregion.threads", 5));
581 this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
582 conf.getInt("hbase.master.executor.serverops.threads", 3));
583 this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
584 conf.getInt("hbase.master.executor.serverops.threads", 5));
585
586 // We depend on there being only one instance of this executor running
587 // at a time. To do concurrency, would need fencing of enable/disable of
588 // tables.
589 this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
590
591 // Start log cleaner thread
592 String n = Thread.currentThread().getName();
593 this.logCleaner =
594 new LogCleaner(conf.getInt("hbase.master.cleaner.interval", 60 * 1000),
595 this, conf, getMasterFileSystem().getFileSystem(),
596 getMasterFileSystem().getOldLogDir());
597 Threads.setDaemonThreadRunning(logCleaner, n + ".oldLogCleaner");
598
599 // Put up info server.
600 int port = this.conf.getInt("hbase.master.info.port", 60010);
601 if (port >= 0) {
602 String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
603 this.infoServer = new InfoServer(MASTER, a, port, false);
604 this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
605 this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
606 this.infoServer.setAttribute(MASTER, this);
607 this.infoServer.start();
608 }
609
610 // Start allowing requests to happen.
611 this.rpcServer.openServer();
612 if (LOG.isDebugEnabled()) {
613 LOG.debug("Started service threads");
614 }
615
616 }
617
618 private void stopServiceThreads() {
619 if (LOG.isDebugEnabled()) {
620 LOG.debug("Stopping service threads");
621 }
622 if (this.rpcServer != null) this.rpcServer.stop();
623 // Clean up and close up shop
624 if (this.logCleaner!= null) this.logCleaner.interrupt();
625 if (this.infoServer != null) {
626 LOG.info("Stopping infoServer");
627 try {
628 this.infoServer.stop();
629 } catch (Exception ex) {
630 ex.printStackTrace();
631 }
632 }
633 if (this.executorService != null) this.executorService.shutdown();
634 }
635
636 private static Thread getAndStartBalancerChore(final HMaster master) {
637 String name = master.getServerName() + "-BalancerChore";
638 int period = master.getConfiguration().getInt("hbase.balancer.period", 300000);
639 // Start up the load balancer chore
640 Chore chore = new Chore(name, period, master) {
641 @Override
642 protected void chore() {
643 master.balance();
644 }
645 };
646 return Threads.setDaemonThreadRunning(chore);
647 }
648
649 private void stopChores() {
650 if (this.balancerChore != null) {
651 this.balancerChore.interrupt();
652 }
653 if (this.catalogJanitorChore != null) {
654 this.catalogJanitorChore.interrupt();
655 }
656 }
657
658 @Override
659 public MapWritable regionServerStartup(final HServerInfo serverInfo,
660 final long serverCurrentTime)
661 throws IOException {
662 // Set the ip into the passed in serverInfo. Its ip is more than likely
663 // not the ip that the master sees here. See at end of this method where
664 // we pass it back to the regionserver by setting "hbase.regionserver.address"
665 // Everafter, the HSI combination 'server name' is what uniquely identifies
666 // the incoming RegionServer.
667 InetSocketAddress address = new InetSocketAddress(
668 HBaseServer.getRemoteIp().getHostName(),
669 serverInfo.getServerAddress().getPort());
670 serverInfo.setServerAddress(new HServerAddress(address));
671
672 // Register with server manager
673 this.serverManager.regionServerStartup(serverInfo, serverCurrentTime);
674 // Send back some config info
675 MapWritable mw = createConfigurationSubset();
676 mw.put(new Text("hbase.regionserver.address"),
677 serverInfo.getServerAddress());
678 return mw;
679 }
680
681 /**
682 * @return Subset of configuration to pass initializing regionservers: e.g.
683 * the filesystem to use and root directory to use.
684 */
685 protected MapWritable createConfigurationSubset() {
686 MapWritable mw = addConfig(new MapWritable(), HConstants.HBASE_DIR);
687 return addConfig(mw, "fs.default.name");
688 }
689
690 private MapWritable addConfig(final MapWritable mw, final String key) {
691 mw.put(new Text(key), new Text(this.conf.get(key)));
692 return mw;
693 }
694
695 @Override
696 public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[],
697 HRegionInfo[] mostLoadedRegions)
698 throws IOException {
699 return adornRegionServerAnswer(serverInfo,
700 this.serverManager.regionServerReport(serverInfo, msgs, mostLoadedRegions));
701 }
702
703 @Override
704 public void reportRSFatalError(HServerInfo serverInfo,
705 String errorText) {
706 String msg = "Region server " + serverInfo + " reported a fatal error:\n"
707 + errorText;
708 LOG.error(msg);
709 rsFatals.add(msg);
710 }
711
712 /**
713 * Override if you'd add messages to return to regionserver <code>hsi</code>
714 * or to send an exception.
715 * @param msgs Messages to add to
716 * @return Messages to return to
717 * @throws IOException exceptions that were injected for the region servers
718 */
719 protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi,
720 final HMsg [] msgs) throws IOException {
721 return msgs;
722 }
723
724 public boolean isMasterRunning() {
725 return !isStopped();
726 }
727
728 @Override
729 public boolean balance() {
730 // If balance not true, don't run balancer.
731 if (!this.balanceSwitch) return false;
732 synchronized (this.balancer) {
733 // Only allow one balance run at at time.
734 if (this.assignmentManager.isRegionsInTransition()) {
735 LOG.debug("Not running balancer because " +
736 this.assignmentManager.getRegionsInTransition().size() +
737 " region(s) in transition: " +
738 org.apache.commons.lang.StringUtils.
739 abbreviate(this.assignmentManager.getRegionsInTransition().toString(), 256));
740 return false;
741 }
742 if (this.serverManager.areDeadServersInProgress()) {
743 LOG.debug("Not running balancer because processing dead regionserver(s): " +
744 this.serverManager.getDeadServers());
745 return false;
746 }
747 Map<HServerInfo, List<HRegionInfo>> assignments =
748 this.assignmentManager.getAssignments();
749 // Returned Map from AM does not include mention of servers w/o assignments.
750 for (Map.Entry<String, HServerInfo> e:
751 this.serverManager.getOnlineServers().entrySet()) {
752 HServerInfo hsi = e.getValue();
753 if (!assignments.containsKey(hsi)) {
754 assignments.put(hsi, new ArrayList<HRegionInfo>());
755 }
756 }
757 List<RegionPlan> plans = this.balancer.balanceCluster(assignments);
758 if (plans != null && !plans.isEmpty()) {
759 for (RegionPlan plan: plans) {
760 LOG.info("balance " + plan);
761 this.assignmentManager.balance(plan);
762 }
763 }
764 }
765 return true;
766 }
767
768 @Override
769 public boolean balanceSwitch(final boolean b) {
770 boolean oldValue = this.balanceSwitch;
771 this.balanceSwitch = b;
772 LOG.info("Balance=" + b);
773 return oldValue;
774 }
775
776 /**
777 * Switch for the background {@link CatalogJanitor} thread.
778 * Used for testing. The thread will continue to run. It will just be a noop
779 * if disabled.
780 * @param b If false, the catalog janitor won't do anything.
781 */
782 public void setCatalogJanitorEnabled(final boolean b) {
783 ((CatalogJanitor)this.catalogJanitorChore).setEnabled(b);
784 }
785
786 @Override
787 public void move(final byte[] encodedRegionName, final byte[] destServerName)
788 throws UnknownRegionException {
789 Pair<HRegionInfo, HServerInfo> p =
790 this.assignmentManager.getAssignment(encodedRegionName);
791 if (p == null)
792 throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
793 HRegionInfo hri = p.getFirst();
794 HServerInfo dest = null;
795 if (destServerName == null || destServerName.length == 0) {
796 LOG.info("Passed destination servername is null/empty so " +
797 "choosing a server at random");
798 this.assignmentManager.clearRegionPlan(hri);
799 // Unassign will reassign it elsewhere choosing random server.
800 this.assignmentManager.unassign(hri);
801 } else {
802 dest = this.serverManager.getServerInfo(new String(destServerName));
803 RegionPlan rp = new RegionPlan(p.getFirst(), p.getSecond(), dest);
804 this.assignmentManager.balance(rp);
805 }
806 }
807
808 public void createTable(HTableDescriptor desc, byte [][] splitKeys)
809 throws IOException {
810 createTable(desc, splitKeys, false);
811 }
812
813 public void createTable(HTableDescriptor desc, byte [][] splitKeys,
814 boolean sync)
815 throws IOException {
816 if (!isMasterRunning()) {
817 throw new MasterNotRunningException();
818 }
819 HRegionInfo [] newRegions = null;
820 if(splitKeys == null || splitKeys.length == 0) {
821 newRegions = new HRegionInfo [] { new HRegionInfo(desc, null, null) };
822 } else {
823 int numRegions = splitKeys.length + 1;
824 newRegions = new HRegionInfo[numRegions];
825 byte [] startKey = null;
826 byte [] endKey = null;
827 for(int i=0;i<numRegions;i++) {
828 endKey = (i == splitKeys.length) ? null : splitKeys[i];
829 newRegions[i] = new HRegionInfo(desc, startKey, endKey);
830 startKey = endKey;
831 }
832 }
833 int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
834 // Need META availability to create a table
835 try {
836 if(catalogTracker.waitForMeta(timeout) == null) {
837 throw new NotAllMetaRegionsOnlineException();
838 }
839 } catch (InterruptedException e) {
840 LOG.warn("Interrupted waiting for meta availability", e);
841 throw new IOException(e);
842 }
843 createTable(newRegions, sync);
844 }
845
846 private synchronized void createTable(final HRegionInfo [] newRegions,
847 final boolean sync)
848 throws IOException {
849 String tableName = newRegions[0].getTableDesc().getNameAsString();
850 if(MetaReader.tableExists(catalogTracker, tableName)) {
851 throw new TableExistsException(tableName);
852 }
853 // 1. Set table enabling flag up in zk.
854 try {
855 assignmentManager.getZKTable().setEnabledTable(tableName);
856 } catch (KeeperException e) {
857 throw new IOException("Unable to ensure that the table will be" +
858 " enabled because of a ZooKeeper issue", e);
859 }
860
861 List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
862 final int batchSize = this.conf.getInt("hbase.master.createtable.batchsize", 100);
863 HLog hlog = null;
864 for (int regionIdx = 0; regionIdx < newRegions.length; regionIdx++) {
865 HRegionInfo newRegion = newRegions[regionIdx];
866
867 // 2. Create HRegion
868 HRegion region = HRegion.createHRegion(newRegion,
869 fileSystemManager.getRootDir(), conf, hlog);
870
871 if (hlog == null) {
872 hlog = region.getLog();
873 }
874
875 regionInfos.add(region.getRegionInfo());
876 if (regionIdx % batchSize == 0) {
877 // 3. Insert into META
878 MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
879 regionInfos.clear();
880 }
881
882 // 4. Close the new region to flush to disk. Close log file too.
883 region.close();
884 }
885 hlog.closeAndDelete();
886 if (regionInfos.size() > 0) {
887 MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
888 }
889
890 // 5. Trigger immediate assignment of the regions in round-robin fashion
891 if (newRegions.length == 1) {
892 this.assignmentManager.assign(newRegions[0], true);
893 } else {
894 List<HServerInfo> servers = serverManager.getOnlineServersList();
895 this.assignmentManager.bulkAssignUserRegions(newRegions, servers, sync);
896 }
897
898 // 6. If sync, wait for assignment of regions
899 if (sync) {
900 LOG.debug("Waiting for " + newRegions.length + " region(s) to be assigned");
901 for (HRegionInfo regionInfo : newRegions) {
902 try {
903 this.assignmentManager.waitForAssignment(regionInfo);
904 } catch (InterruptedException e) {
905 LOG.info("Interrupted waiting for region to be assigned during " +
906 "create table call", e);
907 Thread.currentThread().interrupt();
908 return;
909 }
910 }
911 }
912 }
913
914 private static boolean isCatalogTable(final byte [] tableName) {
915 return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) ||
916 Bytes.equals(tableName, HConstants.META_TABLE_NAME);
917 }
918
919 public void deleteTable(final byte [] tableName) throws IOException {
920 this.executorService.submit(new DeleteTableHandler(tableName, this, this));
921 }
922
923 public void addColumn(byte [] tableName, HColumnDescriptor column)
924 throws IOException {
925 new TableAddFamilyHandler(tableName, column, this, this).process();
926 }
927
928 public void modifyColumn(byte [] tableName, HColumnDescriptor descriptor)
929 throws IOException {
930 new TableModifyFamilyHandler(tableName, descriptor, this, this).process();
931 }
932
933 public void deleteColumn(final byte [] tableName, final byte [] c)
934 throws IOException {
935 new TableDeleteFamilyHandler(tableName, c, this, this).process();
936 }
937
938 public void enableTable(final byte [] tableName) throws IOException {
939 this.executorService.submit(new EnableTableHandler(this, tableName,
940 catalogTracker, assignmentManager));
941 }
942
943 public void disableTable(final byte [] tableName) throws IOException {
944 this.executorService.submit(new DisableTableHandler(this, tableName,
945 catalogTracker, assignmentManager));
946 }
947
948 /**
949 * Return the region and current deployment for the region containing
950 * the given row. If the region cannot be found, returns null. If it
951 * is found, but not currently deployed, the second element of the pair
952 * may be null.
953 */
954 Pair<HRegionInfo,HServerAddress> getTableRegionForRow(
955 final byte [] tableName, final byte [] rowKey)
956 throws IOException {
957 final AtomicReference<Pair<HRegionInfo, HServerAddress>> result =
958 new AtomicReference<Pair<HRegionInfo, HServerAddress>>(null);
959
960 MetaScannerVisitor visitor =
961 new MetaScannerVisitor() {
962 @Override
963 public boolean processRow(Result data) throws IOException {
964 if (data == null || data.size() <= 0) {
965 return true;
966 }
967 Pair<HRegionInfo, HServerAddress> pair =
968 MetaReader.metaRowToRegionPair(data);
969 if (pair == null) {
970 return false;
971 }
972 if (!Bytes.equals(pair.getFirst().getTableDesc().getName(),
973 tableName)) {
974 return false;
975 }
976 result.set(pair);
977 return true;
978 }
979 };
980
981 MetaScanner.metaScan(conf, visitor, tableName, rowKey, 1);
982 return result.get();
983 }
984
985 @Override
986 public void modifyTable(final byte[] tableName, HTableDescriptor htd)
987 throws IOException {
988 this.executorService.submit(new ModifyTableHandler(tableName, htd, this, this));
989 }
990
991 @Override
992 public void checkTableModifiable(final byte [] tableName)
993 throws IOException {
994 String tableNameStr = Bytes.toString(tableName);
995 if (isCatalogTable(tableName)) {
996 throw new IOException("Can't modify catalog tables");
997 }
998 if (!MetaReader.tableExists(getCatalogTracker(), tableNameStr)) {
999 throw new TableNotFoundException(tableNameStr);
1000 }
1001 if (!getAssignmentManager().getZKTable().
1002 isDisabledTable(Bytes.toString(tableName))) {
1003 throw new TableNotDisabledException(tableName);
1004 }
1005 }
1006
1007 public void clearFromTransition(HRegionInfo hri) {
1008 if (this.assignmentManager.isRegionInTransition(hri) != null) {
1009 this.assignmentManager.clearRegionFromTransition(hri);
1010 }
1011 }
1012 /**
1013 * @return cluster status
1014 */
1015 public ClusterStatus getClusterStatus() {
1016 ClusterStatus status = new ClusterStatus();
1017 status.setHBaseVersion(VersionInfo.getVersion());
1018 status.setServerInfo(serverManager.getOnlineServers().values());
1019 status.setDeadServers(serverManager.getDeadServers());
1020 status.setRegionsInTransition(assignmentManager.getRegionsInTransition());
1021 return status;
1022 }
1023
1024 @Override
1025 public void abort(final String msg, final Throwable t) {
1026 if (t != null) LOG.fatal(msg, t);
1027 else LOG.fatal(msg);
1028 this.abort = true;
1029 stop("Aborting");
1030 }
1031
1032 @Override
1033 public ZooKeeperWatcher getZooKeeper() {
1034 return zooKeeper;
1035 }
1036
1037 @Override
1038 public String getServerName() {
1039 return address.toString();
1040 }
1041
1042 @Override
1043 public CatalogTracker getCatalogTracker() {
1044 return catalogTracker;
1045 }
1046
1047 @Override
1048 public AssignmentManager getAssignmentManager() {
1049 return this.assignmentManager;
1050 }
1051
1052 public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
1053 return rsFatals;
1054 }
1055
1056 @Override
1057 public void shutdown() {
1058 this.serverManager.shutdownCluster();
1059 try {
1060 this.clusterStatusTracker.setClusterDown();
1061 } catch (KeeperException e) {
1062 LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
1063 }
1064 }
1065
1066 @Override
1067 public void stopMaster() {
1068 stop("Stopped by " + Thread.currentThread().getName());
1069 }
1070
1071 @Override
1072 public void stop(final String why) {
1073 LOG.info(why);
1074 this.stopped = true;
1075 // If we are a backup master, we need to interrupt wait
1076 synchronized (this.activeMasterManager.clusterHasActiveMaster) {
1077 this.activeMasterManager.clusterHasActiveMaster.notifyAll();
1078 }
1079 }
1080
1081 @Override
1082 public boolean isStopped() {
1083 return this.stopped;
1084 }
1085
1086 /**
1087 * Report whether this master is currently the active master or not.
1088 * If not active master, we are parked on ZK waiting to become active.
1089 *
1090 * This method is used for testing.
1091 *
1092 * @return true if active master, false if not.
1093 */
1094 public boolean isActiveMaster() {
1095 return isActiveMaster;
1096 }
1097
1098 /**
1099 * Report whether this master has completed with its initialization and is
1100 * ready. If ready, the master is also the active master. A standby master
1101 * is never ready.
1102 *
1103 * This method is used for testing.
1104 *
1105 * @return true if master is ready to go, false if not.
1106 */
1107 public boolean isInitialized() {
1108 return initialized;
1109 }
1110
1111 @Override
1112 public void assign(final byte [] regionName, final boolean force)
1113 throws IOException {
1114 Pair<HRegionInfo, HServerAddress> pair =
1115 MetaReader.getRegion(this.catalogTracker, regionName);
1116 if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
1117 assignRegion(pair.getFirst());
1118 }
1119
1120 public void assignRegion(HRegionInfo hri) {
1121 assignmentManager.assign(hri, true);
1122 }
1123
1124 @Override
1125 public void unassign(final byte [] regionName, final boolean force)
1126 throws IOException {
1127 Pair<HRegionInfo, HServerAddress> pair =
1128 MetaReader.getRegion(this.catalogTracker, regionName);
1129 if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
1130 HRegionInfo hri = pair.getFirst();
1131 if (force) {
1132 this.assignmentManager.clearRegionFromTransition(hri);
1133 assignRegion(hri);
1134 } else {
1135 this.assignmentManager.unassign(hri, force);
1136 }
1137 }
1138
1139 /**
1140 * Utility for constructing an instance of the passed HMaster class.
1141 * @param masterClass
1142 * @param conf
1143 * @return HMaster instance.
1144 */
1145 public static HMaster constructMaster(Class<? extends HMaster> masterClass,
1146 final Configuration conf) {
1147 try {
1148 Constructor<? extends HMaster> c =
1149 masterClass.getConstructor(Configuration.class);
1150 return c.newInstance(conf);
1151 } catch (InvocationTargetException ite) {
1152 Throwable target = ite.getTargetException() != null?
1153 ite.getTargetException(): ite;
1154 if (target.getCause() != null) target = target.getCause();
1155 throw new RuntimeException("Failed construction of Master: " +
1156 masterClass.toString(), target);
1157 } catch (Exception e) {
1158 throw new RuntimeException("Failed construction of Master: " +
1159 masterClass.toString() + ((e.getCause() != null)?
1160 e.getCause().getMessage(): ""), e);
1161 }
1162 }
1163
1164
1165 /**
1166 * @see org.apache.hadoop.hbase.master.HMasterCommandLine
1167 */
1168 public static void main(String [] args) throws Exception {
1169 new HMasterCommandLine(HMaster.class).doMain(args);
1170 }
1171}
01172
=== added file 'AppDB/hbase/patch/HRegionServer.java'
--- AppDB/hbase/patch/HRegionServer.java 1970-01-01 00:00:00 +0000
+++ AppDB/hbase/patch/HRegionServer.java 2012-02-16 06:02:21 +0000
@@ -0,0 +1,2828 @@
1/**
2 * Copyright 2010 The Apache Software Foundation
3 *
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements. See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership. The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License. You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20package org.apache.hadoop.hbase.regionserver;
21
22import java.io.IOException;
23import java.lang.Thread.UncaughtExceptionHandler;
24import java.lang.annotation.Retention;
25import java.lang.annotation.RetentionPolicy;
26import java.lang.management.ManagementFactory;
27import java.lang.management.MemoryUsage;
28import java.lang.reflect.Constructor;
29import java.lang.reflect.Method;
30import java.net.BindException;
31import java.net.InetSocketAddress;
32import java.util.ArrayList;
33import java.util.Collection;
34import java.util.Collections;
35import java.util.Comparator;
36import java.util.HashMap;
37import java.util.HashSet;
38import java.util.Iterator;
39import java.util.LinkedList;
40import java.util.List;
41import java.util.Map;
42import java.util.Random;
43import java.util.Set;
44import java.util.SortedMap;
45import java.util.TreeMap;
46import java.util.concurrent.ConcurrentHashMap;
47import java.util.concurrent.ConcurrentSkipListSet;
48import java.util.concurrent.LinkedBlockingQueue;
49import java.util.concurrent.TimeUnit;
50import java.util.concurrent.atomic.AtomicBoolean;
51import java.util.concurrent.atomic.AtomicInteger;
52import java.util.concurrent.locks.ReentrantReadWriteLock;
53
54import org.apache.commons.logging.Log;
55import org.apache.commons.logging.LogFactory;
56import org.apache.hadoop.conf.Configuration;
57import org.apache.hadoop.fs.FileSystem;
58import org.apache.hadoop.fs.Path;
59import org.apache.hadoop.hbase.Chore;
60import org.apache.hadoop.hbase.ClockOutOfSyncException;
61import org.apache.hadoop.hbase.DoNotRetryIOException;
62import org.apache.hadoop.hbase.HBaseConfiguration;
63import org.apache.hadoop.hbase.HConstants;
64import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
65import org.apache.hadoop.hbase.HMsg;
66import org.apache.hadoop.hbase.HRegionInfo;
67import org.apache.hadoop.hbase.HServerAddress;
68import org.apache.hadoop.hbase.HServerInfo;
69import org.apache.hadoop.hbase.HServerLoad;
70import org.apache.hadoop.hbase.KeyValue;
71import org.apache.hadoop.hbase.MasterAddressTracker;
72import org.apache.hadoop.hbase.NotServingRegionException;
73import org.apache.hadoop.hbase.RemoteExceptionHandler;
74import org.apache.hadoop.hbase.Server;
75import org.apache.hadoop.hbase.Stoppable;
76import org.apache.hadoop.hbase.UnknownRowLockException;
77import org.apache.hadoop.hbase.UnknownScannerException;
78import org.apache.hadoop.hbase.YouAreDeadException;
79import org.apache.hadoop.hbase.catalog.CatalogTracker;
80import org.apache.hadoop.hbase.catalog.MetaEditor;
81import org.apache.hadoop.hbase.catalog.RootLocationEditor;
82import org.apache.hadoop.hbase.client.Action;
83import org.apache.hadoop.hbase.client.Delete;
84import org.apache.hadoop.hbase.client.Get;
85import org.apache.hadoop.hbase.client.Increment;
86import org.apache.hadoop.hbase.client.MultiAction;
87import org.apache.hadoop.hbase.client.MultiPut;
88import org.apache.hadoop.hbase.client.MultiPutResponse;
89import org.apache.hadoop.hbase.client.MultiResponse;
90import org.apache.hadoop.hbase.client.Put;
91import org.apache.hadoop.hbase.client.Result;
92import org.apache.hadoop.hbase.client.Row;
93import org.apache.hadoop.hbase.client.Scan;
94import org.apache.hadoop.hbase.executor.ExecutorService;
95import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
96import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
97import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
98import org.apache.hadoop.hbase.ipc.HBaseRPC;
99import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
100import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
101import org.apache.hadoop.hbase.ipc.HBaseRpcMetrics;
102import org.apache.hadoop.hbase.ipc.HBaseServer;
103import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
104import org.apache.hadoop.hbase.ipc.HRegionInterface;
105import org.apache.hadoop.hbase.ipc.ServerNotRunningException;
106import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
107import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
108import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
109import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
110import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
111import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
112import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
113import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
114import org.apache.hadoop.hbase.regionserver.wal.HLog;
115import org.apache.hadoop.hbase.regionserver.wal.WALObserver;
116import org.apache.hadoop.hbase.replication.regionserver.Replication;
117import org.apache.hadoop.hbase.security.User;
118import org.apache.hadoop.hbase.util.Bytes;
119import org.apache.hadoop.hbase.util.CompressionTest;
120import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
121import org.apache.hadoop.hbase.util.FSUtils;
122import org.apache.hadoop.hbase.util.InfoServer;
123import org.apache.hadoop.hbase.util.Pair;
124import org.apache.hadoop.hbase.util.Sleeper;
125import org.apache.hadoop.hbase.util.Strings;
126import org.apache.hadoop.hbase.util.Threads;
127import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
128import org.apache.hadoop.hbase.zookeeper.ZKUtil;
129import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
130import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
131import org.apache.hadoop.io.MapWritable;
132import org.apache.hadoop.io.Writable;
133import org.apache.hadoop.ipc.RemoteException;
134import org.apache.hadoop.net.DNS;
135import org.apache.hadoop.util.StringUtils;
136import org.apache.zookeeper.KeeperException;
137
138import com.google.common.base.Function;
139import com.google.common.collect.Lists;
140
141/**
142 * HRegionServer makes a set of HRegions available to clients. It checks in with
143 * the HMaster. There are many HRegionServers in a single HBase deployment.
144 */
145public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
146 Runnable, RegionServerServices, Server {
147 public static final Log LOG = LogFactory.getLog(HRegionServer.class);
148
149 // Set when a report to the master comes back with a message asking us to
150 // shutdown. Also set by call to stop when debugging or running unit tests
151 // of HRegionServer in isolation.
152 protected volatile boolean stopped = false;
153
154 // A state before we go into stopped state. At this stage we're closing user
155 // space regions.
156 private boolean stopping = false;
157
158 // Go down hard. Used if file system becomes unavailable and also in
159 // debugging and unit tests.
160 protected volatile boolean abortRequested;
161
162 private volatile boolean killed = false;
163
164 // If false, the file system has become unavailable
165 protected volatile boolean fsOk;
166
167 protected HServerInfo serverInfo;
168 protected final Configuration conf;
169
170 protected final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
171 private FileSystem fs;
172 private Path rootDir;
173 private final Random rand = new Random();
174
175 /**
176 * Map of regions currently being served by this region server. Key is the
177 * encoded region name. All access should be synchronized.
178 */
179 protected final Map<String, HRegion> onlineRegions =
180 new ConcurrentHashMap<String, HRegion>();
181
182 protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
183 private final LinkedBlockingQueue<HMsg> outboundMsgs = new LinkedBlockingQueue<HMsg>();
184
185 final int numRetries;
186 protected final int threadWakeFrequency;
187 private final int msgInterval;
188
189 protected final int numRegionsToReport;
190
191 private final long maxScannerResultSize;
192
193 // Remote HMaster
194 private HMasterRegionInterface hbaseMaster;
195
196 // Server to handle client requests. Default access so can be accessed by
197 // unit tests.
198 HBaseServer server;
199
200 // Leases
201 private Leases leases;
202
203 // Request counter
204 private volatile AtomicInteger requestCount = new AtomicInteger();
205
206 // Info server. Default access so can be used by unit tests. REGIONSERVER
207 // is name of the webapp and the attribute name used stuffing this instance
208 // into web context.
209 InfoServer infoServer;
210
211 /** region server process name */
212 public static final String REGIONSERVER = "regionserver";
213
214 /*
215 * Space is reserved in HRS constructor and then released when aborting to
216 * recover from an OOME. See HBASE-706. TODO: Make this percentage of the heap
217 * or a minimum.
218 */
219 private final LinkedList<byte[]> reservedSpace = new LinkedList<byte[]>();
220
221 private RegionServerMetrics metrics;
222
223 // Compactions
224 CompactSplitThread compactSplitThread;
225
226 // Cache flushing
227 MemStoreFlusher cacheFlusher;
228
229 /*
230 * Check for major compactions.
231 */
232 Chore majorCompactionChecker;
233
234 // HLog and HLog roller. log is protected rather than private to avoid
235 // eclipse warning when accessed by inner classes
236 protected volatile HLog hlog;
237 LogRoller hlogRoller;
238
239 // flag set after we're done setting up server threads (used for testing)
240 protected volatile boolean isOnline;
241
242 final Map<String, InternalScanner> scanners = new ConcurrentHashMap<String, InternalScanner>();
243
244 // zookeeper connection and watcher
245 private ZooKeeperWatcher zooKeeper;
246
247 // master address manager and watcher
248 private MasterAddressTracker masterAddressManager;
249
250 // catalog tracker
251 private CatalogTracker catalogTracker;
252
253 // Cluster Status Tracker
254 private ClusterStatusTracker clusterStatusTracker;
255
256 // Log Splitting Worker
257 private SplitLogWorker splitLogWorker;
258
259 // A sleeper that sleeps for msgInterval.
260 private final Sleeper sleeper;
261
262 private final int rpcTimeout;
263
264 // The main region server thread.
265 @SuppressWarnings("unused")
266 private Thread regionServerThread;
267
268 // Instance of the hbase executor service.
269 private ExecutorService service;
270
271 // Replication services. If no replication, this handler will be null.
272 private Replication replicationHandler;
273
274 private final Set<byte[]> regionsInTransitionInRS =
275 new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
276
277 /**
278 * Starts a HRegionServer at the default location
279 *
280 * @param conf
281 * @throws IOException
282 * @throws InterruptedException
283 */
284 public HRegionServer(Configuration conf) throws IOException, InterruptedException {
285 this.fsOk = true;
286 this.conf = conf;
287 this.isOnline = false;
288
289 // check to see if the codec list is available:
290 String [] codecs = conf.getStrings("hbase.regionserver.codecs",
291 (String[])null);
292 if (codecs != null) {
293 for (String codec : codecs) {
294 if (!CompressionTest.testCompression(codec)) {
295 throw new IOException("Compression codec " + codec +
296 " not supported, aborting RS construction");
297 }
298 }
299 }
300
301 // Config'ed params
302 this.numRetries = conf.getInt("hbase.client.retries.number", 10);
303 this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY,
304 10 * 1000);
305 this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000);
306
307 sleeper = new Sleeper(this.msgInterval, this);
308
309 this.maxScannerResultSize = conf.getLong(
310 HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
311 HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
312
313 this.numRegionsToReport = conf.getInt(
314 "hbase.regionserver.numregionstoreport", 10);
315
316 this.rpcTimeout = conf.getInt(
317 HConstants.HBASE_RPC_TIMEOUT_KEY,
318 HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
319
320 this.abortRequested = false;
321 this.stopped = false;
322
323 // Server to handle client requests
324 String machineName = DNS.getDefaultIP(
325 conf.get("hbase.regionserver.dns.interface","default"));
326
327 String addressStr = machineName + ":" +
328 conf.get(HConstants.REGIONSERVER_PORT,
329 Integer.toString(HConstants.DEFAULT_REGIONSERVER_PORT));
330 HServerAddress address = new HServerAddress(addressStr);
331 this.server = HBaseRPC.getServer(this,
332 new Class<?>[]{HRegionInterface.class, HBaseRPCErrorHandler.class,
333 OnlineRegions.class},
334 address.getBindAddress(),
335 address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
336 conf.getInt("hbase.regionserver.metahandler.count", 10),
337 false, conf, QOS_THRESHOLD);
338 this.server.setErrorHandler(this);
339 this.server.setQosFunction(new QosFunction());
340
341 // HServerInfo can be amended by master. See below in reportForDuty.
342 this.serverInfo = new HServerInfo(new HServerAddress(new InetSocketAddress(
343 address.getBindAddress(), this.server.getListenerAddress().getPort())),
344 System.currentTimeMillis(), this.conf.getInt(
345 "hbase.regionserver.info.port", 60030), machineName);
346 if (this.serverInfo.getServerAddress() == null) {
347 throw new NullPointerException("Server address cannot be null; "
348 + "hbase-958 debugging");
349 }
350
351 // login the server principal (if using secure Hadoop)
352 User.login(conf, "hbase.regionserver.keytab.file",
353 "hbase.regionserver.kerberos.principal", serverInfo.getHostname());
354 }
355
356 private static final int NORMAL_QOS = 0;
357 private static final int QOS_THRESHOLD = 10; // the line between low and high qos
358 private static final int HIGH_QOS = 100;
359
360 @Retention(RetentionPolicy.RUNTIME)
361 private @interface QosPriority {
362 int priority() default 0;
363 }
364
365 class QosFunction implements Function<Writable,Integer> {
366 private final Map<String, Integer> annotatedQos;
367
368 public QosFunction() {
369 Map<String, Integer> qosMap = new HashMap<String, Integer>();
370 for (Method m : HRegionServer.class.getMethods()) {
371 QosPriority p = m.getAnnotation(QosPriority.class);
372 if (p != null) {
373 qosMap.put(m.getName(), p.priority());
374 }
375 }
376
377 annotatedQos = qosMap;
378 }
379
380 public boolean isMetaRegion(byte[] regionName) {
381 HRegion region;
382 try {
383 region = getRegion(regionName);
384 } catch (NotServingRegionException ignored) {
385 return false;
386 }
387 return region.getRegionInfo().isMetaRegion();
388 }
389
390 @Override
391 public Integer apply(Writable from) {
392 if (!(from instanceof HBaseRPC.Invocation)) return NORMAL_QOS;
393
394 HBaseRPC.Invocation inv = (HBaseRPC.Invocation) from;
395 String methodName = inv.getMethodName();
396
397 Integer priorityByAnnotation = annotatedQos.get(methodName);
398 if (priorityByAnnotation != null) {
399 return priorityByAnnotation;
400 }
401
402 // scanner methods...
403 if (methodName.equals("next") || methodName.equals("close")) {
404 // translate!
405 Long scannerId;
406 try {
407 scannerId = (Long) inv.getParameters()[0];
408 } catch (ClassCastException ignored) {
409 // LOG.debug("Low priority: " + from);
410 return NORMAL_QOS; // doh.
411 }
412 String scannerIdString = Long.toString(scannerId);
413 InternalScanner scanner = scanners.get(scannerIdString);
414 if (scanner instanceof HRegion.RegionScanner) {
415 HRegion.RegionScanner rs = (HRegion.RegionScanner) scanner;
416 HRegionInfo regionName = rs.getRegionName();
417 if (regionName.isMetaRegion()) {
418 // LOG.debug("High priority scanner request: " + scannerId);
419 return HIGH_QOS;
420 }
421 }
422 } else if (inv.getParameterClasses().length == 0) {
423 // Just let it through. This is getOnlineRegions, etc.
424 } else if (inv.getParameterClasses()[0] == byte[].class) {
425 // first arg is byte array, so assume this is a regionname:
426 if (isMetaRegion((byte[]) inv.getParameters()[0])) {
427 // LOG.debug("High priority with method: " + methodName +
428 // " and region: "
429 // + Bytes.toString((byte[]) inv.getParameters()[0]));
430 return HIGH_QOS;
431 }
432 } else if (inv.getParameterClasses()[0] == MultiAction.class) {
433 MultiAction ma = (MultiAction) inv.getParameters()[0];
434 Set<byte[]> regions = ma.getRegions();
435 // ok this sucks, but if any single of the actions touches a meta, the
436 // whole
437 // thing gets pingged high priority. This is a dangerous hack because
438 // people
439 // can get their multi action tagged high QOS by tossing a Get(.META.)
440 // AND this
441 // regionserver hosts META/-ROOT-
442 for (byte[] region : regions) {
443 if (isMetaRegion(region)) {
444 // LOG.debug("High priority multi with region: " +
445 // Bytes.toString(region));
446 return HIGH_QOS; // short circuit for the win.
447 }
448 }
449 }
450 // LOG.debug("Low priority: " + from.toString());
451 return NORMAL_QOS;
452 }
453 }
454
455 /**
456 * Creates all of the state that needs to be reconstructed in case we are
457 * doing a restart. This is shared between the constructor and restart(). Both
458 * call it.
459 *
460 * @throws IOException
461 * @throws InterruptedException
462 */
463 private void initialize() {
464 try {
465 initializeZooKeeper();
466 initializeThreads();
467 int nbBlocks = conf.getInt("hbase.regionserver.nbreservationblocks", 4);
468 for (int i = 0; i < nbBlocks; i++) {
469 reservedSpace.add(new byte[HConstants.DEFAULT_SIZE_RESERVATION_BLOCK]);
470 }
471 } catch (Throwable t) {
472 // Call stop if error or process will stick around for ever since server
473 // puts up non-daemon threads.
474 this.server.stop();
475 abort("Initialization of RS failed. Hence aborting RS.", t);
476 }
477 }
478
479 /**
480 * Bring up connection to zk ensemble and then wait until a master for this
481 * cluster and then after that, wait until cluster 'up' flag has been set.
482 * This is the order in which master does things.
483 * Finally put up a catalog tracker.
484 * @throws IOException
485 * @throws InterruptedException
486 */
487 private void initializeZooKeeper() throws IOException, InterruptedException {
488 // Open connection to zookeeper and set primary watcher
489 zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" +
490 serverInfo.getServerAddress().getPort(), this);
491
492 // Create the master address manager, register with zk, and start it. Then
493 // block until a master is available. No point in starting up if no master
494 // running.
495 this.masterAddressManager = new MasterAddressTracker(this.zooKeeper, this);
496 this.masterAddressManager.start();
497 blockAndCheckIfStopped(this.masterAddressManager);
498
499 // Wait on cluster being up. Master will set this flag up in zookeeper
500 // when ready.
501 this.clusterStatusTracker = new ClusterStatusTracker(this.zooKeeper, this);
502 this.clusterStatusTracker.start();
503 blockAndCheckIfStopped(this.clusterStatusTracker);
504
505 // Create the catalog tracker and start it;
506 this.catalogTracker = new CatalogTracker(this.zooKeeper, this.conf,
507 this, this.conf.getInt("hbase.regionserver.catalog.timeout", Integer.MAX_VALUE));
508 catalogTracker.start();
509 }
510
511 /**
512 * Utilty method to wait indefinitely on a znode availability while checking
513 * if the region server is shut down
514 * @param tracker znode tracker to use
515 * @throws IOException any IO exception, plus if the RS is stopped
516 * @throws InterruptedException
517 */
518 private void blockAndCheckIfStopped(ZooKeeperNodeTracker tracker)
519 throws IOException, InterruptedException {
520 while (tracker.blockUntilAvailable(this.msgInterval) == null) {
521 if (this.stopped) {
522 throw new IOException("Received the shutdown message while waiting.");
523 }
524 }
525 }
526
527 /**
528 * @return False if cluster shutdown in progress
529 */
530 private boolean isClusterUp() {
531 return this.clusterStatusTracker.isClusterUp();
532 }
533
534 private void initializeThreads() throws IOException {
535
536 // Cache flushing thread.
537 this.cacheFlusher = new MemStoreFlusher(conf, this);
538
539 // Compaction thread
540 this.compactSplitThread = new CompactSplitThread(this);
541
542 // Background thread to check for major compactions; needed if region
543 // has not gotten updates in a while. Make it run at a lesser frequency.
544 int multiplier = this.conf.getInt(HConstants.THREAD_WAKE_FREQUENCY
545 + ".multiplier", 1000);
546 this.majorCompactionChecker = new MajorCompactionChecker(this,
547 this.threadWakeFrequency * multiplier, this);
548
549 this.leases = new Leases((int) conf.getLong(
550 HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
551 HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD),
552 this.threadWakeFrequency);
553 }
554
555 /**
556 * The HRegionServer sticks in this loop until closed. It repeatedly checks in
557 * with the HMaster, sending heartbeats & reports, and receiving HRegion
558 * load/unload instructions.
559 */
560 public void run() {
561
562 try {
563 // Initialize threads and wait for a master
564 initialize();
565 } catch (Throwable e) {
566 abort("Fatal exception during initialization", e);
567 }
568
569 this.regionServerThread = Thread.currentThread();
570 try {
571 while (!this.stopped) {
572 if (tryReportForDuty()) break;
573 }
574 long lastMsg = 0;
575 List<HMsg> outboundMessages = new ArrayList<HMsg>();
576 // The main run loop.
577 for (int tries = 0; !this.stopped && isHealthy();) {
578 if (!isClusterUp()) {
579 if (isOnlineRegionsEmpty()) {
580 stop("Exiting; cluster shutdown set and not carrying any regions");
581 } else if (!this.stopping) {
582 this.stopping = true;
583 closeUserRegions(this.abortRequested);
584 } else if (this.stopping && LOG.isDebugEnabled()) {
585 LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString());
586 }
587 }
588 long now = System.currentTimeMillis();
589 // Drop into the send loop if msgInterval has elapsed or if something
590 // to send. If we fail talking to the master, then we'll sleep below
591 // on poll of the outboundMsgs blockingqueue.
592 if ((now - lastMsg) >= msgInterval || !outboundMessages.isEmpty()) {
593 try {
594 doMetrics();
595 tryRegionServerReport(outboundMessages);
596 lastMsg = System.currentTimeMillis();
597 // Reset tries count if we had a successful transaction.
598 tries = 0;
599 if (this.stopped) continue;
600 } catch (Exception e) { // FindBugs REC_CATCH_EXCEPTION
601 // Two special exceptions could be printed out here,
602 // PleaseHoldException and YouAreDeadException
603 if (e instanceof IOException) {
604 e = RemoteExceptionHandler.checkIOException((IOException) e);
605 }
606 if (e instanceof YouAreDeadException) {
607 // This will be caught and handled as a fatal error below
608 throw e;
609 }
610 tries++;
611 if (tries > 0 && (tries % this.numRetries) == 0) {
612 // Check filesystem every so often.
613 checkFileSystem();
614 }
615 if (this.stopped) {
616 continue;
617 }
618 LOG.warn("Attempt=" + tries, e);
619 // No point retrying immediately; this is probably connection to
620 // master issue. Doing below will cause us to sleep.
621 lastMsg = System.currentTimeMillis();
622 }
623 }
624 now = System.currentTimeMillis();
625 HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), TimeUnit.MILLISECONDS);
626 if (msg != null) outboundMessages.add(msg);
627 } // for
628 } catch (Throwable t) {
629 if (!checkOOME(t)) {
630 abort("Unhandled exception: " + t.getMessage(), t);
631 }
632 }
633 this.leases.closeAfterLeasesExpire();
634 this.server.stop();
635 if (this.splitLogWorker != null) {
636 splitLogWorker.stop();
637 }
638 if (this.infoServer != null) {
639 LOG.info("Stopping infoServer");
640 try {
641 this.infoServer.stop();
642 } catch (Exception e) {
643 e.printStackTrace();
644 }
645 }
646 // Send cache a shutdown.
647 LruBlockCache c = (LruBlockCache) StoreFile.getBlockCache(this.conf);
648 if (c != null) {
649 c.shutdown();
650 }
651
652 // Send interrupts to wake up threads if sleeping so they notice shutdown.
653 // TODO: Should we check they are alive? If OOME could have exited already
654 if (this.cacheFlusher != null) this.cacheFlusher.interruptIfNecessary();
655 if (this.compactSplitThread != null) this.compactSplitThread.interruptIfNecessary();
656 if (this.hlogRoller != null) this.hlogRoller.interruptIfNecessary();
657 if (this.majorCompactionChecker != null) this.majorCompactionChecker.interrupt();
658
659 if (this.killed) {
660 // Just skip out w/o closing regions.
661 } else if (abortRequested) {
662 if (this.fsOk) {
663 closeAllRegions(abortRequested); // Don't leave any open file handles
664 closeWAL(false);
665 }
666 LOG.info("aborting server at: " + this.serverInfo.getServerName());
667 } else {
668 closeAllRegions(abortRequested);
669 closeWAL(true);
670 closeAllScanners();
671 LOG.info("stopping server at: " + this.serverInfo.getServerName());
672 }
673 // Interrupt catalog tracker here in case any regions being opened out in
674 // handlers are stuck waiting on meta or root.
675 if (this.catalogTracker != null) this.catalogTracker.stop();
676 if (this.fsOk)
677 waitOnAllRegionsToClose(abortRequested);
678
679 // Make sure the proxy is down.
680 if (this.hbaseMaster != null) {
681 HBaseRPC.stopProxy(this.hbaseMaster);
682 this.hbaseMaster = null;
683 }
684 this.leases.close();
685 this.zooKeeper.close();
686 if (!killed) {
687 join();
688 }
689 LOG.info(Thread.currentThread().getName() + " exiting");
690 }
691
692 String getOnlineRegionsAsPrintableString() {
693 StringBuilder sb = new StringBuilder();
694 for (HRegion r: this.onlineRegions.values()) {
695 if (sb.length() > 0) sb.append(", ");
696 sb.append(r.getRegionInfo().getEncodedName());
697 }
698 return sb.toString();
699 }
700
701 /**
702 * Wait on regions close.
703 */
704 private void waitOnAllRegionsToClose(final boolean abort) {
705 // Wait till all regions are closed before going out.
706 int lastCount = -1;
707 while (!isOnlineRegionsEmpty()) {
708 int count = getNumberOfOnlineRegions();
709 // Only print a message if the count of regions has changed.
710 if (count != lastCount) {
711 lastCount = count;
712 LOG.info("Waiting on " + count + " regions to close");
713 // Only print out regions still closing if a small number else will
714 // swamp the log.
715 if (count < 10 && LOG.isDebugEnabled()) {
716 LOG.debug(this.onlineRegions);
717 }
718 }
719 // Ensure all user regions have been sent to close. Use this to
720 // protect against the case where an open comes in after we start the
721 // iterator of onlineRegions to close all user regions.
722 for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
723 HRegionInfo hri = e.getValue().getRegionInfo();
724 if (!this.regionsInTransitionInRS.contains(hri.getEncodedNameAsBytes())) {
725 // Don't update zk with this close transition; pass false.
726 closeRegion(hri, abort, false);
727 }
728 }
729 Threads.sleep(1000);
730 }
731 }
732
733 List<HMsg> tryRegionServerReport(final List<HMsg> outboundMessages)
734 throws IOException {
735 this.serverInfo.setLoad(buildServerLoad());
736 this.requestCount.set(0);
737 addOutboundMsgs(outboundMessages);
738 HMsg [] msgs = null;
739 while (!this.stopped) {
740 try {
741 msgs = this.hbaseMaster.regionServerReport(this.serverInfo,
742 outboundMessages.toArray(HMsg.EMPTY_HMSG_ARRAY),
743 getMostLoadedRegions());
744 break;
745 } catch (IOException ioe) {
746 if (ioe instanceof RemoteException) {
747 ioe = ((RemoteException)ioe).unwrapRemoteException();
748 }
749 if (ioe instanceof YouAreDeadException) {
750 // This will be caught and handled as a fatal error in run()
751 throw ioe;
752 }
753 LOG.warn("RemoteException connecting to master", ioe);
754 // Couldn't connect to the master, get location from zk and reconnect
755 // Method blocks until new master is found or we are stopped
756 getMaster();
757 }
758 }
759 updateOutboundMsgs(outboundMessages);
760 outboundMessages.clear();
761
762 for (int i = 0; !this.stopped && msgs != null && i < msgs.length; i++) {
763 LOG.info(msgs[i].toString());
764 // Intercept stop regionserver messages
765 if (msgs[i].getType().equals(HMsg.Type.STOP_REGIONSERVER)) {
766 stop("Received " + msgs[i]);
767 continue;
768 }
769 LOG.warn("NOT PROCESSING " + msgs[i] + " -- WHY IS MASTER SENDING IT TO US?");
770 }
771 return outboundMessages;
772 }
773
774 private HServerLoad buildServerLoad() {
775 MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
776 HServerLoad hsl = new HServerLoad(requestCount.get(),
777 (int)(memory.getUsed() / 1024 / 1024),
778 (int) (memory.getMax() / 1024 / 1024));
779 for (HRegion r : this.onlineRegions.values()) {
780 hsl.addRegionInfo(createRegionLoad(r));
781 }
782 return hsl;
783 }
784
785 private void closeWAL(final boolean delete) {
786 try {
787 if (this.hlog != null) {
788 if (delete) {
789 hlog.closeAndDelete();
790 } else {
791 hlog.close();
792 }
793 }
794 } catch (Throwable e) {
795 LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e));
796 }
797 }
798
799 private void closeAllScanners() {
800 // Close any outstanding scanners. Means they'll get an UnknownScanner
801 // exception next time they come in.
802 for (Map.Entry<String, InternalScanner> e : this.scanners.entrySet()) {
803 try {
804 e.getValue().close();
805 } catch (IOException ioe) {
806 LOG.warn("Closing scanner " + e.getKey(), ioe);
807 }
808 }
809 }
810
811 /*
812 * Add to the passed <code>msgs</code> messages to pass to the master.
813 *
814 * @param msgs Current outboundMsgs array; we'll add messages to this List.
815 */
816 private void addOutboundMsgs(final List<HMsg> msgs) {
817 if (msgs.isEmpty()) {
818 this.outboundMsgs.drainTo(msgs);
819 return;
820 }
821 OUTER: for (HMsg m : this.outboundMsgs) {
822 for (HMsg mm : msgs) {
823 // Be careful don't add duplicates.
824 if (mm.equals(m)) {
825 continue OUTER;
826 }
827 }
828 msgs.add(m);
829 }
830 }
831
832 /*
833 * Remove from this.outboundMsgs those messsages we sent the master.
834 *
835 * @param msgs Messages we sent the master.
836 */
837 private void updateOutboundMsgs(final List<HMsg> msgs) {
838 if (msgs.isEmpty()) {
839 return;
840 }
841 for (HMsg m : this.outboundMsgs) {
842 for (HMsg mm : msgs) {
843 if (mm.equals(m)) {
844 this.outboundMsgs.remove(m);
845 break;
846 }
847 }
848 }
849 }
850
851 /*
852 * Run init. Sets up hlog and starts up all server threads.
853 *
854 * @param c Extra configuration.
855 */
856 protected void handleReportForDutyResponse(final MapWritable c) throws IOException {
857 try {
858 for (Map.Entry<Writable, Writable> e : c.entrySet()) {
859
860 String key = e.getKey().toString();
861 // Use the address the master passed us
862 if (key.equals("hbase.regionserver.address")) {
863 HServerAddress hsa = (HServerAddress) e.getValue();
864 LOG.info("Master passed us address to use. Was="
865 + this.serverInfo.getServerAddress() + ", Now=" + hsa.toString());
866 this.serverInfo.setServerAddress(hsa);
867 continue;
868 }
869 String value = e.getValue().toString();
870 if (LOG.isDebugEnabled()) {
871 LOG.debug("Config from master: " + key + "=" + value);
872 }
873 this.conf.set(key, value);
874 }
875 // hack! Maps DFSClient => RegionServer for logs. HDFS made this
876 // config param for task trackers, but we can piggyback off of it.
877 if (this.conf.get("mapred.task.id") == null) {
878 this.conf.set("mapred.task.id",
879 "hb_rs_" + this.serverInfo.getServerName() + "_" +
880 System.currentTimeMillis());
881 }
882
883 // Master sent us hbase.rootdir to use. Should be fully qualified
884 // path with file system specification included. Set 'fs.defaultFS'
885 // to match the filesystem on hbase.rootdir else underlying hadoop hdfs
886 // accessors will be going against wrong filesystem (unless all is set
887 // to defaults).
888 this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir"));
889 // Get fs instance used by this RS
890 this.fs = FileSystem.get(this.conf);
891 this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
892 this.hlog = setupWALAndReplication();
893 // Init in here rather than in constructor after thread name has been set
894 this.metrics = new RegionServerMetrics();
895 startServiceThreads();
896 LOG.info("Serving as " + this.serverInfo.getServerName() +
897 ", RPC listening on " + this.server.getListenerAddress() +
898 ", sessionid=0x" +
899 Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId()));
900 isOnline = true;
901 } catch (Throwable e) {
902 this.isOnline = false;
903 stop("Failed initialization");
904 throw convertThrowableToIOE(cleanup(e, "Failed init"),
905 "Region server startup failed");
906 }
907 }
908
909 /*
910 * @param r Region to get RegionLoad for.
911 *
912 * @return RegionLoad instance.
913 *
914 * @throws IOException
915 */
916 private HServerLoad.RegionLoad createRegionLoad(final HRegion r) {
917 byte[] name = r.getRegionName();
918 int stores = 0;
919 int storefiles = 0;
920 int storefileSizeMB = 0;
921 int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024);
922 int storefileIndexSizeMB = 0;
923 synchronized (r.stores) {
924 stores += r.stores.size();
925 for (Store store : r.stores.values()) {
926 storefiles += store.getStorefilesCount();
927 storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
928 storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
929 }
930 }
931 return new HServerLoad.RegionLoad(name, stores, storefiles,
932 storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB);
933 }
934
935 /**
936 * @param encodedRegionName
937 * @return An instance of RegionLoad.
938 * @throws IOException
939 */
940 public HServerLoad.RegionLoad createRegionLoad(final String encodedRegionName) {
941 HRegion r = null;
942 r = this.onlineRegions.get(encodedRegionName);
943 return r != null ? createRegionLoad(r) : null;
944 }
945
946 /*
947 * Cleanup after Throwable caught invoking method. Converts <code>t</code> to
948 * IOE if it isn't already.
949 *
950 * @param t Throwable
951 *
952 * @return Throwable converted to an IOE; methods can only let out IOEs.
953 */
954 private Throwable cleanup(final Throwable t) {
955 return cleanup(t, null);
956 }
957
958 /*
959 * Cleanup after Throwable caught invoking method. Converts <code>t</code> to
960 * IOE if it isn't already.
961 *
962 * @param t Throwable
963 *
964 * @param msg Message to log in error. Can be null.
965 *
966 * @return Throwable converted to an IOE; methods can only let out IOEs.
967 */
968 private Throwable cleanup(final Throwable t, final String msg) {
969 // Don't log as error if NSRE; NSRE is 'normal' operation.
970 if (t instanceof NotServingRegionException) {
971 LOG.debug("NotServingRegionException; " + t.getMessage());
972 return t;
973 }
974 if (msg == null) {
975 LOG.error("", RemoteExceptionHandler.checkThrowable(t));
976 } else {
977 LOG.error(msg, RemoteExceptionHandler.checkThrowable(t));
978 }
979 if (!checkOOME(t)) {
980 checkFileSystem();
981 }
982 return t;
983 }
984
985 /*
986 * @param t
987 *
988 * @return Make <code>t</code> an IOE if it isn't already.
989 */
990 private IOException convertThrowableToIOE(final Throwable t) {
991 return convertThrowableToIOE(t, null);
992 }
993
994 /*
995 * @param t
996 *
997 * @param msg Message to put in new IOE if passed <code>t</code> is not an IOE
998 *
999 * @return Make <code>t</code> an IOE if it isn't already.
1000 */
1001 private IOException convertThrowableToIOE(final Throwable t, final String msg) {
1002 return (t instanceof IOException ? (IOException) t : msg == null
1003 || msg.length() == 0 ? new IOException(t) : new IOException(msg, t));
1004 }
1005
1006 /*
1007 * Check if an OOME and if so, call abort.
1008 *
1009 * @param e
1010 *
1011 * @return True if we OOME'd and are aborting.
1012 */
1013 public boolean checkOOME(final Throwable e) {
1014 boolean stop = false;
1015 if (e instanceof OutOfMemoryError
1016 || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError)
1017 || (e.getMessage() != null && e.getMessage().contains(
1018 "java.lang.OutOfMemoryError"))) {
1019 abort("OutOfMemoryError, aborting", e);
1020 stop = true;
1021 }
1022 return stop;
1023 }
1024
1025 /**
1026 * Checks to see if the file system is still accessible. If not, sets
1027 * abortRequested and stopRequested
1028 *
1029 * @return false if file system is not available
1030 */
1031 protected boolean checkFileSystem() {
1032 if (this.fsOk && this.fs != null) {
1033 try {
1034 FSUtils.checkFileSystemAvailable(this.fs);
1035 } catch (IOException e) {
1036 abort("File System not available", e);
1037 this.fsOk = false;
1038 }
1039 }
1040 return this.fsOk;
1041 }
1042
1043 /*
1044 * Inner class that runs on a long period checking if regions need major
1045 * compaction.
1046 */
1047 private static class MajorCompactionChecker extends Chore {
1048 private final HRegionServer instance;
1049 private int majorCompactPriority;
1050 private final static int DEFAULT_PRIORITY = Integer.MAX_VALUE;
1051
1052
1053 MajorCompactionChecker(final HRegionServer h, final int sleepTime,
1054 final Stoppable stopper) {
1055 super("MajorCompactionChecker", sleepTime, h);
1056 this.instance = h;
1057 LOG.info("Runs every " + sleepTime + "ms");
1058
1059 /*
1060 * MajorCompactPriority is configurable.
1061 * If not set, the compaction will use default priority.
1062 */
1063 majorCompactPriority = this.instance.conf.getInt(
1064 "hbase.regionserver.compactionChecker.majorCompactPriority",
1065 DEFAULT_PRIORITY);
1066 }
1067
1068 @Override
1069 protected void chore() {
1070 for (HRegion r : this.instance.onlineRegions.values()) {
1071 try {
1072 if (r != null && r.isMajorCompaction()) {
1073 // Queue a compaction. Will recognize if major is needed.
1074 if(majorCompactPriority == DEFAULT_PRIORITY ||
1075 majorCompactPriority > r.getCompactPriority()){
1076 this.instance.compactSplitThread.requestCompaction(r, getName()
1077 + " requests major compaction use default priority");
1078 } else {
1079 this.instance.compactSplitThread.requestCompaction(r, getName()
1080 + " requests major compaction use configured priority",
1081 this.majorCompactPriority);
1082 }
1083 }
1084 } catch (IOException e) {
1085 LOG.warn("Failed major compaction check on " + r, e);
1086 }
1087 }
1088 }
1089 }
1090
1091 /**
1092 * Report the status of the server. A server is online once all the startup is
1093 * completed (setting up filesystem, starting service threads, etc.). This
1094 * method is designed mostly to be useful in tests.
1095 *
1096 * @return true if online, false if not.
1097 */
1098 public boolean isOnline() {
1099 return isOnline;
1100 }
1101
1102 /**
1103 * Setup WAL log and replication if enabled.
1104 * Replication setup is done in here because it wants to be hooked up to WAL.
1105 * @return A WAL instance.
1106 * @throws IOException
1107 */
1108 private HLog setupWALAndReplication() throws IOException {
1109 final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
1110 Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverInfo));
1111 if (LOG.isDebugEnabled()) {
1112 LOG.debug("logdir=" + logdir);
1113 }
1114 if (this.fs.exists(logdir)) {
1115 throw new RegionServerRunningException("Region server already "
1116 + "running at " + this.serverInfo.getServerName()
1117 + " because logdir " + logdir.toString() + " exists");
1118 }
1119
1120 // Instantiate replication manager if replication enabled. Pass it the
1121 // log directories.
1122 try {
1123 this.replicationHandler = Replication.isReplication(this.conf)?
1124 new Replication(this, this.fs, logdir, oldLogDir): null;
1125 } catch (KeeperException e) {
1126 throw new IOException("Failed replication handler create", e);
1127 }
1128 return instantiateHLog(logdir, oldLogDir);
1129 }
1130
1131 /**
1132 * Called by {@link #setupWALAndReplication()} creating WAL instance.
1133 * @param logdir
1134 * @param oldLogDir
1135 * @return WAL instance.
1136 * @throws IOException
1137 */
1138 protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException {
1139 return new HLog(this.fs, logdir, oldLogDir, this.conf,
1140 getWALActionListeners(), this.serverInfo.getServerAddress().toString());
1141 }
1142
1143 /**
1144 * Called by {@link #instantiateHLog(Path, Path)} setting up WAL instance.
1145 * Add any {@link WALObserver}s you want inserted before WAL startup.
1146 * @return List of WALActionsListener that will be passed in to
1147 * {@link HLog} on construction.
1148 */
1149 protected List<WALObserver> getWALActionListeners() {
1150 List<WALObserver> listeners = new ArrayList<WALObserver>();
1151 // Log roller.
1152 this.hlogRoller = new LogRoller(this, this);
1153 listeners.add(this.hlogRoller);
1154 if (this.replicationHandler != null) {
1155 // Replication handler is an implementation of WALActionsListener.
1156 listeners.add(this.replicationHandler);
1157 }
1158 return listeners;
1159 }
1160
1161 protected LogRoller getLogRoller() {
1162 return hlogRoller;
1163 }
1164
1165 /*
1166 * @param interval Interval since last time metrics were called.
1167 */
1168 protected void doMetrics() {
1169 try {
1170 metrics();
1171 } catch (Throwable e) {
1172 LOG.warn("Failed metrics", e);
1173 }
1174 }
1175
1176 protected void metrics() {
1177 int seconds = this.msgInterval / 1000;
1178 if(0 == seconds){
1179 seconds = 1;
1180 }
1181 this.metrics.regions.set(this.onlineRegions.size());
1182 this.metrics.requests.set(this.requestCount.get()/seconds);
1183 // Is this too expensive every three seconds getting a lock on onlineRegions
1184 // and then per store carried? Can I make metrics be sloppier and avoid
1185 // the synchronizations?
1186 int stores = 0;
1187 int storefiles = 0;
1188 long memstoreSize = 0;
1189 long storefileIndexSize = 0;
1190 for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
1191 HRegion r = e.getValue();
1192 memstoreSize += r.memstoreSize.get();
1193 synchronized (r.stores) {
1194 stores += r.stores.size();
1195 for (Map.Entry<byte[], Store> ee : r.stores.entrySet()) {
1196 Store store = ee.getValue();
1197 storefiles += store.getStorefilesCount();
1198 storefileIndexSize += store.getStorefilesIndexSize();
1199 }
1200 }
1201 }
1202 this.metrics.stores.set(stores);
1203 this.metrics.storefiles.set(storefiles);
1204 this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024)));
1205 this.metrics.storefileIndexSizeMB
1206 .set((int) (storefileIndexSize / (1024 * 1024)));
1207 this.metrics.compactionQueueSize.set(compactSplitThread
1208 .getCompactionQueueSize());
1209 this.metrics.flushQueueSize.set(cacheFlusher
1210 .getFlushQueueSize());
1211
1212 LruBlockCache lruBlockCache = (LruBlockCache) StoreFile.getBlockCache(conf);
1213 if (lruBlockCache != null) {
1214 this.metrics.blockCacheCount.set(lruBlockCache.size());
1215 this.metrics.blockCacheFree.set(lruBlockCache.getFreeSize());
1216 this.metrics.blockCacheSize.set(lruBlockCache.getCurrentSize());
1217 CacheStats cacheStats = lruBlockCache.getStats();
1218 this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
1219 this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
1220 this.metrics.blockCacheEvictedCount.set(lruBlockCache.getEvictedCount());
1221 double ratio = lruBlockCache.getStats().getHitRatio();
1222 int percent = (int) (ratio * 100);
1223 this.metrics.blockCacheHitRatio.set(percent);
1224 ratio = lruBlockCache.getStats().getHitCachingRatio();
1225 percent = (int) (ratio * 100);
1226 this.metrics.blockCacheHitCachingRatio.set(percent);
1227 }
1228 }
1229
1230 /**
1231 * @return Region server metrics instance.
1232 */
1233 public RegionServerMetrics getMetrics() {
1234 return this.metrics;
1235 }
1236
1237 /*
1238 * Start maintanence Threads, Server, Worker and lease checker threads.
1239 * Install an UncaughtExceptionHandler that calls abort of RegionServer if we
1240 * get an unhandled exception. We cannot set the handler on all threads.
1241 * Server's internal Listener thread is off limits. For Server, if an OOME, it
1242 * waits a while then retries. Meantime, a flush or a compaction that tries to
1243 * run should trigger same critical condition and the shutdown will run. On
1244 * its way out, this server will shut down Server. Leases are sort of
1245 * inbetween. It has an internal thread that while it inherits from Chore, it
1246 * keeps its own internal stop mechanism so needs to be stopped by this
1247 * hosting server. Worker logs the exception and exits.
1248 */
1249 private void startServiceThreads() throws IOException {
1250 String n = Thread.currentThread().getName();
1251 UncaughtExceptionHandler handler = new UncaughtExceptionHandler() {
1252 public void uncaughtException(Thread t, Throwable e) {
1253 abort("Uncaught exception in service thread " + t.getName(), e);
1254 }
1255 };
1256
1257 // Start executor services
1258 this.service = new ExecutorService(getServerName());
1259 this.service.startExecutorService(ExecutorType.RS_OPEN_REGION,
1260 conf.getInt("hbase.regionserver.executor.openregion.threads", 3));
1261 this.service.startExecutorService(ExecutorType.RS_OPEN_ROOT,
1262 conf.getInt("hbase.regionserver.executor.openroot.threads", 1));
1263 this.service.startExecutorService(ExecutorType.RS_OPEN_META,
1264 conf.getInt("hbase.regionserver.executor.openmeta.threads", 1));
1265 this.service.startExecutorService(ExecutorType.RS_CLOSE_REGION,
1266 conf.getInt("hbase.regionserver.executor.closeregion.threads", 3));
1267 this.service.startExecutorService(ExecutorType.RS_CLOSE_ROOT,
1268 conf.getInt("hbase.regionserver.executor.closeroot.threads", 1));
1269 this.service.startExecutorService(ExecutorType.RS_CLOSE_META,
1270 conf.getInt("hbase.regionserver.executor.closemeta.threads", 1));
1271
1272 Threads.setDaemonThreadRunning(this.hlogRoller, n + ".logRoller", handler);
1273 Threads.setDaemonThreadRunning(this.cacheFlusher, n + ".cacheFlusher",
1274 handler);
1275 Threads.setDaemonThreadRunning(this.compactSplitThread, n + ".compactor",
1276 handler);
1277 Threads.setDaemonThreadRunning(this.majorCompactionChecker, n
1278 + ".majorCompactionChecker", handler);
1279
1280 // Leases is not a Thread. Internally it runs a daemon thread. If it gets
1281 // an unhandled exception, it will just exit.
1282 this.leases.setName(n + ".leaseChecker");
1283 this.leases.start();
1284 // Put up info server.
1285 int port = this.conf.getInt("hbase.regionserver.info.port", 60030);
1286 // -1 is for disabling info server
1287 if (port >= 0) {
1288 String addr = this.conf.get("hbase.regionserver.info.bindAddress",
1289 "0.0.0.0");
1290 // check if auto port bind enabled
1291 boolean auto = this.conf.getBoolean("hbase.regionserver.info.port.auto",
1292 false);
1293 while (true) {
1294 try {
1295 this.infoServer = new InfoServer(REGIONSERVER, addr, port, false);
1296 this.infoServer.addServlet("status", "/rs-status", RSStatusServlet.class);
1297 this.infoServer.addServlet("dump", "/dump", RSDumpServlet.class);
1298 this.infoServer.setAttribute("regionserver", this);
1299 this.infoServer.start();
1300 break;
1301 } catch (BindException e) {
1302 if (!auto) {
1303 // auto bind disabled throw BindException
1304 throw e;
1305 }
1306 // auto bind enabled, try to use another port
1307 LOG.info("Failed binding http info server to port: " + port);
1308 port++;
1309 // update HRS server info port.
1310 this.serverInfo = new HServerInfo(this.serverInfo.getServerAddress(),
1311 this.serverInfo.getStartCode(), port,
1312 this.serverInfo.getHostname());
1313 }
1314 }
1315 }
1316
1317 if (this.replicationHandler != null) {
1318 this.replicationHandler.startReplicationServices();
1319 }
1320
1321 // Start Server. This service is like leases in that it internally runs
1322 // a thread.
1323 this.server.start();
1324
1325 // Create the log splitting worker and start it
1326 this.splitLogWorker = new SplitLogWorker(this.zooKeeper,
1327 this.getConfiguration(), this.getServerName().toString());
1328 splitLogWorker.start();
1329 }
1330
1331 /*
1332 * Verify that server is healthy
1333 */
1334 private boolean isHealthy() {
1335 if (!fsOk) {
1336 // File system problem
1337 return false;
1338 }
1339 // Verify that all threads are alive
1340 if (!(leases.isAlive() && compactSplitThread.isAlive()
1341 && cacheFlusher.isAlive() && hlogRoller.isAlive()
1342 && this.majorCompactionChecker.isAlive())) {
1343 stop("One or more threads are no longer alive -- stop");
1344 return false;
1345 }
1346 return true;
1347 }
1348
1349 @Override
1350 public HLog getWAL() {
1351 return this.hlog;
1352 }
1353
1354 @Override
1355 public CatalogTracker getCatalogTracker() {
1356 return this.catalogTracker;
1357 }
1358
1359 @Override
1360 public void stop(final String msg) {
1361 this.stopped = true;
1362 LOG.info("STOPPED: " + msg);
1363 synchronized (this) {
1364 // Wakes run() if it is sleeping
1365 notifyAll(); // FindBugs NN_NAKED_NOTIFY
1366 }
1367 }
1368
1369 @Override
1370 public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
1371 final boolean daughter)
1372 throws KeeperException, IOException {
1373 // Do checks to see if we need to compact (references or too many files)
1374 if (r.hasReferences() || r.hasTooManyStoreFiles()) {
1375 getCompactionRequester().requestCompaction(r,
1376 r.hasReferences()? "Region has references on open" :
1377 "Region has too many store files");
1378 }
1379
1380 // Add to online regions if all above was successful.
1381 addToOnlineRegions(r);
1382
1383 // Update ZK, ROOT or META
1384 if (r.getRegionInfo().isRootRegion()) {
1385 RootLocationEditor.setRootLocation(getZooKeeper(),
1386 getServerInfo().getServerAddress());
1387 } else if (r.getRegionInfo().isMetaRegion()) {
1388 MetaEditor.updateMetaLocation(ct, r.getRegionInfo(), getServerInfo());
1389 } else {
1390 if (daughter) {
1391 // If daughter of a split, update whole row, not just location.
1392 MetaEditor.addDaughter(ct, r.getRegionInfo(), getServerInfo());
1393 } else {
1394 MetaEditor.updateRegionLocation(ct, r.getRegionInfo(), getServerInfo());
1395 }
1396 }
1397 }
1398
1399 /**
1400 * Return a reference to the metrics instance used for counting RPC calls.
1401 * @return Metrics instance.
1402 */
1403 public HBaseRpcMetrics getRpcMetrics() {
1404 return server.getRpcMetrics();
1405 }
1406
1407 /**
1408 * Cause the server to exit without closing the regions it is serving, the log
1409 * it is using and without notifying the master. Used unit testing and on
1410 * catastrophic events such as HDFS is yanked out from under hbase or we OOME.
1411 *
1412 * @param reason
1413 * the reason we are aborting
1414 * @param cause
1415 * the exception that caused the abort, or null
1416 */
1417 public void abort(String reason, Throwable cause) {
1418 String msg = "ABORTING region server " + this + ": " + reason;
1419 if (cause != null) {
1420 LOG.fatal(msg, cause);
1421 } else {
1422 LOG.fatal(msg);
1423 }
1424 this.abortRequested = true;
1425 this.reservedSpace.clear();
1426 if (this.metrics != null) {
1427 LOG.info("Dump of metrics: " + this.metrics);
1428 }
1429 // Do our best to report our abort to the master, but this may not work
1430 try {
1431 if (cause != null) {
1432 msg += "\nCause:\n" + StringUtils.stringifyException(cause);
1433 }
1434 if (hbaseMaster != null) {
1435 hbaseMaster.reportRSFatalError(serverInfo, msg);
1436 }
1437 } catch (Throwable t) {
1438 LOG.warn("Unable to report fatal error to master", t);
1439 }
1440 stop(reason);
1441 }
1442
1443 /**
1444 * @see HRegionServer#abort(String, Throwable)
1445 */
1446 public void abort(String reason) {
1447 abort(reason, null);
1448 }
1449
1450 /*
1451 * Simulate a kill -9 of this server. Exits w/o closing regions or cleaninup
1452 * logs but it does close socket in case want to bring up server on old
1453 * hostname+port immediately.
1454 */
1455 protected void kill() {
1456 this.killed = true;
1457 abort("Simulated kill");
1458 }
1459
1460 /**
1461 * Wait on all threads to finish. Presumption is that all closes and stops
1462 * have already been called.
1463 */
1464 protected void join() {
1465 Threads.shutdown(this.majorCompactionChecker);
1466 Threads.shutdown(this.cacheFlusher);
1467 Threads.shutdown(this.compactSplitThread);
1468 Threads.shutdown(this.hlogRoller);
1469 this.service.shutdown();
1470 if (this.replicationHandler != null) {
1471 this.replicationHandler.join();
1472 }
1473 }
1474
1475 /**
1476 * Get the current master from ZooKeeper and open the RPC connection to it.
1477 *
1478 * Method will block until a master is available. You can break from this
1479 * block by requesting the server stop.
1480 *
1481 * @return master address, or null if server has been stopped
1482 */
1483 private HServerAddress getMaster() {
1484 HServerAddress masterAddress = null;
1485 HMasterRegionInterface master = null;
1486
1487 while (!stopped && master == null) {
1488
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches