Merge lp:~nchohan/appscale/appscale-1.6 into lp:appscale

Proposed by Navraj Chohan
Status: Merged
Merged at revision: 790
Proposed branch: lp:~nchohan/appscale/appscale-1.6
Merge into: lp:appscale
Diff against target: 225866 lines (+171892/-26939)
842 files modified
AppController/djinn.rb (+37/-22)
AppController/lib/djinn_job_data.rb (+5/-2)
AppController/lib/haproxy.rb (+6/-6)
AppController/lib/nginx.rb (+7/-7)
AppController/lib/pbserver.rb (+3/-3)
AppController/lib/rabbitmq.rb (+58/-0)
AppController/terminate.rb (+1/-0)
AppDB/appscale_server.py (+8/-3)
AppDB/appscale_server_mysql.py (+85/-20)
AppDB/cassandra/cassandra_helper.rb (+29/-7)
AppDB/cassandra/prime_cassandra.py (+13/-14)
AppDB/cassandra/py_cassandra.py (+64/-85)
AppDB/cassandra/templates/brisk (+16/-0)
AppDB/cassandra/templates/cassandra.yaml (+146/-73)
AppDB/cassandra/test_cassandra.py (+89/-286)
AppDB/hadoop/hadoop_helper.rb (+1/-1)
AppDB/hadoop/patch/hadoop-hbase.patch (+4/-2)
AppDB/hadoop/templates/hadoop (+468/-0)
AppDB/hbase/hbase-status.sh (+1/-1)
AppDB/hbase/hbase_helper.rb (+1/-1)
AppDB/hbase/patch/HMaster.java (+1171/-0)
AppDB/hbase/patch/HRegionServer.java (+2828/-0)
AppDB/hbase/patch/hbase-defaultip.patch (+0/-42)
AppDB/hbase/templates/hbase-site.xml (+2/-2)
AppDB/hypertable/hypertable_helper.rb (+2/-2)
AppDB/hypertable/py_hypertable.py (+9/-7)
AppDB/hypertable/templates/Capfile (+166/-153)
AppDB/hypertable/templates/hypertable.cfg (+0/-2)
AppDB/zkappscale/zktransaction.py (+3/-2)
AppDB/zkappscale/zookeeper_helper.rb (+1/-0)
AppLoadBalancer/app/views/layouts/main.html.erb (+2/-1)
AppLoadBalancer/app/views/status/cloud.html.erb (+2/-2)
AppLoadBalancer/app/views/users/login.html.erb (+2/-2)
AppLoadBalancer/app/views/users/new.html.erb (+2/-2)
AppLoadBalancer/public/stylesheets/bootstrap.min.css (+356/-0)
AppServer/README (+50/-33)
AppServer/RELEASE_NOTES (+429/-0)
AppServer/VERSION (+2/-2)
AppServer/appcfg.py (+29/-8)
AppServer/bulkload_client.py (+29/-8)
AppServer/bulkloader.py (+29/-8)
AppServer/dev_appserver.py (+29/-8)
AppServer/gen_protorpc.py (+99/-0)
AppServer/google/appengine/_internal/__init__.py (+16/-0)
AppServer/google/appengine/_internal/antlr3/__init__.py (+171/-0)
AppServer/google/appengine/_internal/antlr3/compat.py (+48/-0)
AppServer/google/appengine/_internal/antlr3/constants.py (+57/-0)
AppServer/google/appengine/_internal/antlr3/dfa.py (+213/-0)
AppServer/google/appengine/_internal/antlr3/dottreegen.py (+210/-0)
AppServer/google/appengine/_internal/antlr3/exceptions.py (+364/-0)
AppServer/google/appengine/_internal/antlr3/extras.py (+47/-0)
AppServer/google/appengine/_internal/antlr3/recognizers.py (+1474/-0)
AppServer/google/appengine/_internal/antlr3/streams.py (+1452/-0)
AppServer/google/appengine/_internal/antlr3/tokens.py (+416/-0)
AppServer/google/appengine/_internal/antlr3/tree.py (+2446/-0)
AppServer/google/appengine/_internal/antlr3/treewizard.py (+612/-0)
AppServer/google/appengine/_internal/django/__init__.py (+16/-0)
AppServer/google/appengine/_internal/django/conf/__init__.py (+120/-0)
AppServer/google/appengine/_internal/django/conf/app_template/models.py (+3/-0)
AppServer/google/appengine/_internal/django/conf/app_template/tests.py (+23/-0)
AppServer/google/appengine/_internal/django/conf/app_template/views.py (+1/-0)
AppServer/google/appengine/_internal/django/conf/global_settings.py (+524/-0)
AppServer/google/appengine/_internal/django/conf/locale/en/LC_MESSAGES/django.po (+5002/-0)
AppServer/google/appengine/_internal/django/conf/locale/en/LC_MESSAGES/djangojs.po (+145/-0)
AppServer/google/appengine/_internal/django/conf/locale/en/formats.py (+38/-0)
AppServer/google/appengine/_internal/django/conf/project_template/manage.py (+11/-0)
AppServer/google/appengine/_internal/django/conf/project_template/settings.py (+96/-0)
AppServer/google/appengine/_internal/django/conf/project_template/urls.py (+16/-0)
AppServer/google/appengine/_internal/django/conf/urls/defaults.py (+42/-0)
AppServer/google/appengine/_internal/django/conf/urls/i18n.py (+5/-0)
AppServer/google/appengine/_internal/django/conf/urls/shortcut.py (+5/-0)
AppServer/google/appengine/_internal/django/core/cache/__init__.py (+79/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/base.py (+144/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/db.py (+145/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/dummy.py (+37/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/filebased.py (+171/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/locmem.py (+143/-0)
AppServer/google/appengine/_internal/django/core/cache/backends/memcached.py (+104/-0)
AppServer/google/appengine/_internal/django/core/context_processors.py (+102/-0)
AppServer/google/appengine/_internal/django/core/exceptions.py (+87/-0)
AppServer/google/appengine/_internal/django/core/files/__init__.py (+1/-0)
AppServer/google/appengine/_internal/django/core/files/base.py (+134/-0)
AppServer/google/appengine/_internal/django/core/files/images.py (+62/-0)
AppServer/google/appengine/_internal/django/core/files/locks.py (+70/-0)
AppServer/google/appengine/_internal/django/core/files/move.py (+88/-0)
AppServer/google/appengine/_internal/django/core/files/storage.py (+244/-0)
AppServer/google/appengine/_internal/django/core/files/temp.py (+56/-0)
AppServer/google/appengine/_internal/django/core/files/uploadedfile.py (+128/-0)
AppServer/google/appengine/_internal/django/core/files/uploadhandler.py (+215/-0)
AppServer/google/appengine/_internal/django/core/files/utils.py (+29/-0)
AppServer/google/appengine/_internal/django/core/handlers/base.py (+220/-0)
AppServer/google/appengine/_internal/django/core/handlers/modpython.py (+228/-0)
AppServer/google/appengine/_internal/django/core/handlers/profiler-hotshot.py (+22/-0)
AppServer/google/appengine/_internal/django/core/handlers/wsgi.py (+265/-0)
AppServer/google/appengine/_internal/django/core/mail/__init__.py (+107/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/__init__.py (+1/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/base.py (+39/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/console.py (+37/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/dummy.py (+9/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/filebased.py (+59/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/locmem.py (+24/-0)
AppServer/google/appengine/_internal/django/core/mail/backends/smtp.py (+109/-0)
AppServer/google/appengine/_internal/django/core/mail/message.py (+320/-0)
AppServer/google/appengine/_internal/django/core/mail/utils.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/__init__.py (+437/-0)
AppServer/google/appengine/_internal/django/core/management/base.py (+431/-0)
AppServer/google/appengine/_internal/django/core/management/color.py (+50/-0)
AppServer/google/appengine/_internal/django/core/management/commands/cleanup.py (+11/-0)
AppServer/google/appengine/_internal/django/core/management/commands/compilemessages.py (+61/-0)
AppServer/google/appengine/_internal/django/core/management/commands/createcachetable.py (+52/-0)
AppServer/google/appengine/_internal/django/core/management/commands/dbshell.py (+27/-0)
AppServer/google/appengine/_internal/django/core/management/commands/diffsettings.py (+32/-0)
AppServer/google/appengine/_internal/django/core/management/commands/dumpdata.py (+167/-0)
AppServer/google/appengine/_internal/django/core/management/commands/flush.py (+83/-0)
AppServer/google/appengine/_internal/django/core/management/commands/inspectdb.py (+167/-0)
AppServer/google/appengine/_internal/django/core/management/commands/loaddata.py (+240/-0)
AppServer/google/appengine/_internal/django/core/management/commands/makemessages.py (+321/-0)
AppServer/google/appengine/_internal/django/core/management/commands/reset.py (+57/-0)
AppServer/google/appengine/_internal/django/core/management/commands/runfcgi.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/runserver.py (+84/-0)
AppServer/google/appengine/_internal/django/core/management/commands/shell.py (+69/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sql.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlall.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlclear.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlcustom.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlflush.py (+19/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlindexes.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlinitialdata.py (+7/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlreset.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/sqlsequencereset.py (+20/-0)
AppServer/google/appengine/_internal/django/core/management/commands/startapp.py (+47/-0)
AppServer/google/appengine/_internal/django/core/management/commands/startproject.py (+39/-0)
AppServer/google/appengine/_internal/django/core/management/commands/syncdb.py (+156/-0)
AppServer/google/appengine/_internal/django/core/management/commands/test.py (+40/-0)
AppServer/google/appengine/_internal/django/core/management/commands/testserver.py (+33/-0)
AppServer/google/appengine/_internal/django/core/management/commands/validate.py (+9/-0)
AppServer/google/appengine/_internal/django/core/management/sql.py (+182/-0)
AppServer/google/appengine/_internal/django/core/management/validation.py (+302/-0)
AppServer/google/appengine/_internal/django/core/paginator.py (+120/-0)
AppServer/google/appengine/_internal/django/core/serializers/__init__.py (+117/-0)
AppServer/google/appengine/_internal/django/core/serializers/base.py (+172/-0)
AppServer/google/appengine/_internal/django/core/serializers/json.py (+62/-0)
AppServer/google/appengine/_internal/django/core/serializers/python.py (+142/-0)
AppServer/google/appengine/_internal/django/core/serializers/pyyaml.py (+56/-0)
AppServer/google/appengine/_internal/django/core/serializers/xml_serializer.py (+295/-0)
AppServer/google/appengine/_internal/django/core/servers/basehttp.py (+719/-0)
AppServer/google/appengine/_internal/django/core/servers/fastcgi.py (+183/-0)
AppServer/google/appengine/_internal/django/core/signals.py (+5/-0)
AppServer/google/appengine/_internal/django/core/template_loader.py (+7/-0)
AppServer/google/appengine/_internal/django/core/urlresolvers.py (+396/-0)
AppServer/google/appengine/_internal/django/core/validators.py (+172/-0)
AppServer/google/appengine/_internal/django/core/xheaders.py (+24/-0)
AppServer/google/appengine/_internal/django/template/__init__.py (+1050/-0)
AppServer/google/appengine/_internal/django/template/context.py (+149/-0)
AppServer/google/appengine/_internal/django/template/debug.py (+101/-0)
AppServer/google/appengine/_internal/django/template/defaultfilters.py (+920/-0)
AppServer/google/appengine/_internal/django/template/defaulttags.py (+1162/-0)
AppServer/google/appengine/_internal/django/template/loader.py (+198/-0)
AppServer/google/appengine/_internal/django/template/loader_tags.py (+218/-0)
AppServer/google/appengine/_internal/django/template/loaders/app_directories.py (+74/-0)
AppServer/google/appengine/_internal/django/template/loaders/cached.py (+59/-0)
AppServer/google/appengine/_internal/django/template/loaders/eggs.py (+39/-0)
AppServer/google/appengine/_internal/django/template/loaders/filesystem.py (+61/-0)
AppServer/google/appengine/_internal/django/template/smartif.py (+206/-0)
AppServer/google/appengine/_internal/django/templatetags/cache.py (+63/-0)
AppServer/google/appengine/_internal/django/templatetags/i18n.py (+274/-0)
AppServer/google/appengine/_internal/django/utils/_os.py (+45/-0)
AppServer/google/appengine/_internal/django/utils/_threading_local.py (+240/-0)
AppServer/google/appengine/_internal/django/utils/autoreload.py (+119/-0)
AppServer/google/appengine/_internal/django/utils/cache.py (+228/-0)
AppServer/google/appengine/_internal/django/utils/checksums.py (+22/-0)
AppServer/google/appengine/_internal/django/utils/copycompat.py (+14/-0)
AppServer/google/appengine/_internal/django/utils/daemonize.py (+58/-0)
AppServer/google/appengine/_internal/django/utils/datastructures.py (+473/-0)
AppServer/google/appengine/_internal/django/utils/dateformat.py (+286/-0)
AppServer/google/appengine/_internal/django/utils/dates.py (+33/-0)
AppServer/google/appengine/_internal/django/utils/datetime_safe.py (+89/-0)
AppServer/google/appengine/_internal/django/utils/decorators.py (+90/-0)
AppServer/google/appengine/_internal/django/utils/encoding.py (+180/-0)
AppServer/google/appengine/_internal/django/utils/feedgenerator.py (+372/-0)
AppServer/google/appengine/_internal/django/utils/formats.py (+159/-0)
AppServer/google/appengine/_internal/django/utils/functional.py (+367/-0)
AppServer/google/appengine/_internal/django/utils/hashcompat.py (+20/-0)
AppServer/google/appengine/_internal/django/utils/html.py (+189/-0)
AppServer/google/appengine/_internal/django/utils/http.py (+130/-0)
AppServer/google/appengine/_internal/django/utils/importlib.py (+36/-0)
AppServer/google/appengine/_internal/django/utils/itercompat.py (+45/-0)
AppServer/google/appengine/_internal/django/utils/module_loading.py (+63/-0)
AppServer/google/appengine/_internal/django/utils/numberformat.py (+47/-0)
AppServer/google/appengine/_internal/django/utils/regex_helper.py (+328/-0)
AppServer/google/appengine/_internal/django/utils/safestring.py (+119/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/__init__.py (+349/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/decoder.py (+345/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/encoder.py (+430/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/scanner.py (+65/-0)
AppServer/google/appengine/_internal/django/utils/simplejson/tool.py (+35/-0)
AppServer/google/appengine/_internal/django/utils/stopwords.py (+42/-0)
AppServer/google/appengine/_internal/django/utils/synch.py (+87/-0)
AppServer/google/appengine/_internal/django/utils/termcolors.py (+198/-0)
AppServer/google/appengine/_internal/django/utils/text.py (+282/-0)
AppServer/google/appengine/_internal/django/utils/thread_support.py (+12/-0)
AppServer/google/appengine/_internal/django/utils/timesince.py (+69/-0)
AppServer/google/appengine/_internal/django/utils/translation/__init__.py (+104/-0)
AppServer/google/appengine/_internal/django/utils/translation/trans_null.py (+72/-0)
AppServer/google/appengine/_internal/django/utils/translation/trans_real.py (+550/-0)
AppServer/google/appengine/_internal/django/utils/tree.py (+153/-0)
AppServer/google/appengine/_internal/django/utils/tzinfo.py (+77/-0)
AppServer/google/appengine/_internal/django/utils/version.py (+42/-0)
AppServer/google/appengine/_internal/django/utils/xmlutils.py (+14/-0)
AppServer/google/appengine/_internal/graphy/__init__.py (+1/-0)
AppServer/google/appengine/_internal/graphy/backends/google_chart_api/__init__.py (+50/-0)
AppServer/google/appengine/_internal/graphy/backends/google_chart_api/encoders.py (+430/-0)
AppServer/google/appengine/_internal/graphy/backends/google_chart_api/util.py (+230/-0)
AppServer/google/appengine/_internal/graphy/bar_chart.py (+171/-0)
AppServer/google/appengine/_internal/graphy/common.py (+412/-0)
AppServer/google/appengine/_internal/graphy/formatters.py (+192/-0)
AppServer/google/appengine/_internal/graphy/line_chart.py (+122/-0)
AppServer/google/appengine/_internal/graphy/pie_chart.py (+178/-0)
AppServer/google/appengine/_internal/graphy/util.py (+13/-0)
AppServer/google/appengine/api/api_base_pb.py (+16/-0)
AppServer/google/appengine/api/apiproxy_stub.py (+5/-5)
AppServer/google/appengine/api/app_identity/__init__.py (+23/-0)
AppServer/google/appengine/api/app_identity/app_identity.py (+467/-0)
AppServer/google/appengine/api/app_identity/app_identity_service_pb.py (+1704/-0)
AppServer/google/appengine/api/app_identity/app_identity_stub.py (+140/-0)
AppServer/google/appengine/api/app_logging.py (+8/-9)
AppServer/google/appengine/api/appinfo.py (+289/-53)
AppServer/google/appengine/api/appinfo_errors.py (+48/-0)
AppServer/google/appengine/api/appinfo_includes.py (+27/-11)
AppServer/google/appengine/api/backendinfo.py (+227/-0)
AppServer/google/appengine/api/backends/__init__.py (+25/-0)
AppServer/google/appengine/api/backends/backends.py (+241/-0)
AppServer/google/appengine/api/blobstore/blobstore.py (+171/-32)
AppServer/google/appengine/api/blobstore/blobstore_service_pb.py (+368/-3)
AppServer/google/appengine/api/blobstore/dict_blob_storage.py (+63/-0)
AppServer/google/appengine/api/capabilities/capability_service_pb.py (+11/-0)
AppServer/google/appengine/api/capabilities/capability_stub.py (+13/-31)
AppServer/google/appengine/api/channel/channel_service_pb.py (+13/-2)
AppServer/google/appengine/api/channel/channel_service_stub.py (+194/-58)
AppServer/google/appengine/api/conf.py (+435/-0)
AppServer/google/appengine/api/conversion/__init__.py (+45/-0)
AppServer/google/appengine/api/conversion/conversion.py (+517/-0)
AppServer/google/appengine/api/conversion/conversion_service_pb.py (+1112/-0)
AppServer/google/appengine/api/conversion/conversion_stub.py (+163/-0)
AppServer/google/appengine/api/conversion/static/test.html (+11/-0)
AppServer/google/appengine/api/conversion/static/test.txt (+1/-0)
AppServer/google/appengine/api/datastore.py (+489/-261)
AppServer/google/appengine/api/datastore_admin.py (+0/-44)
AppServer/google/appengine/api/datastore_distributed.py (+2/-6)
AppServer/google/appengine/api/datastore_errors.py (+6/-3)
AppServer/google/appengine/api/datastore_file_stub.py (+219/-1004)
AppServer/google/appengine/api/datastore_types.py (+157/-17)
AppServer/google/appengine/api/files/__init__.py (+1/-0)
AppServer/google/appengine/api/files/blobstore.py (+69/-8)
AppServer/google/appengine/api/files/crc32c.py (+155/-0)
AppServer/google/appengine/api/files/file.py (+164/-111)
AppServer/google/appengine/api/files/file_service_pb.py (+1703/-247)
AppServer/google/appengine/api/files/file_service_stub.py (+242/-47)
AppServer/google/appengine/api/files/gs.py (+122/-0)
AppServer/google/appengine/api/files/records.py (+371/-0)
AppServer/google/appengine/api/files/testutil.py (+10/-4)
AppServer/google/appengine/api/images/__init__.py (+267/-30)
AppServer/google/appengine/api/images/images_service_pb.py (+362/-7)
AppServer/google/appengine/api/images/images_stub.py (+252/-25)
AppServer/google/appengine/api/lib_config.py (+138/-74)
AppServer/google/appengine/api/logservice/log_service_pb.py (+2895/-262)
AppServer/google/appengine/api/logservice/logservice.py (+804/-92)
AppServer/google/appengine/api/logservice/logservice_stub.py (+494/-0)
AppServer/google/appengine/api/logservice/logsutil.py (+82/-0)
AppServer/google/appengine/api/mail.py (+111/-6)
AppServer/google/appengine/api/mail_errors.py (+3/-0)
AppServer/google/appengine/api/mail_service_pb.py (+209/-3)
AppServer/google/appengine/api/memcache/__init__.py (+471/-138)
AppServer/google/appengine/api/memcache/memcache_service_pb.py (+28/-0)
AppServer/google/appengine/api/memcache/memcache_stub.py (+32/-9)
AppServer/google/appengine/api/prospective_search/error_pb.py (+10/-0)
AppServer/google/appengine/api/prospective_search/prospective_search.py (+2/-2)
AppServer/google/appengine/api/prospective_search/prospective_search_pb.py (+21/-0)
AppServer/google/appengine/api/prospective_search/prospective_search_stub.py (+13/-9)
AppServer/google/appengine/api/queueinfo.py (+6/-2)
AppServer/google/appengine/api/quota.py (+20/-8)
AppServer/google/appengine/api/rdbms.py (+15/-3)
AppServer/google/appengine/api/rdbms_mysqldb.py (+34/-4)
AppServer/google/appengine/api/rdbms_sqlite.py (+0/-65)
AppServer/google/appengine/api/runtime.py (+0/-72)
AppServer/google/appengine/api/runtime/__init__.py (+24/-0)
AppServer/google/appengine/api/runtime/runtime.py (+121/-0)
AppServer/google/appengine/api/search/ExpressionLexer.py (+1740/-0)
AppServer/google/appengine/api/search/ExpressionParser.py (+1622/-0)
AppServer/google/appengine/api/search/QueryLexer.py (+1679/-0)
AppServer/google/appengine/api/search/QueryParser.py (+3146/-0)
AppServer/google/appengine/api/search/__init__.py (+46/-0)
AppServer/google/appengine/api/search/expression_parser.py (+82/-0)
AppServer/google/appengine/api/search/query_parser.py (+113/-0)
AppServer/google/appengine/api/search/search.py (+2063/-0)
AppServer/google/appengine/api/search/search_service_pb.py (+4423/-0)
AppServer/google/appengine/api/search/simple_search_stub.py (+861/-0)
AppServer/google/appengine/api/system/__init__.py (+16/-0)
AppServer/google/appengine/api/system/system_service_pb.py (+769/-0)
AppServer/google/appengine/api/system/system_stub.py (+83/-0)
AppServer/google/appengine/api/taskqueue/__init__.py (+4/-1)
AppServer/google/appengine/api/taskqueue/taskqueue.py (+638/-98)
AppServer/google/appengine/api/taskqueue/taskqueue_rabbitmq.py (+786/-0)
AppServer/google/appengine/api/taskqueue/taskqueue_service_pb.py (+996/-67)
AppServer/google/appengine/api/taskqueue/taskqueue_stub.py (+1215/-296)
AppServer/google/appengine/api/urlfetch.py (+31/-3)
AppServer/google/appengine/api/urlfetch_errors.py (+7/-1)
AppServer/google/appengine/api/urlfetch_service_pb.py (+12/-0)
AppServer/google/appengine/api/user_service_pb.py (+22/-2)
AppServer/google/appengine/api/users.py (+3/-2)
AppServer/google/appengine/api/validation.py (+31/-9)
AppServer/google/appengine/api/xmpp/SOAPpy/Client.py (+0/-495)
AppServer/google/appengine/api/xmpp/SOAPpy/Config.py (+0/-202)
AppServer/google/appengine/api/xmpp/SOAPpy/Errors.py (+0/-79)
AppServer/google/appengine/api/xmpp/SOAPpy/GSIServer.py (+0/-143)
AppServer/google/appengine/api/xmpp/SOAPpy/NS.py (+0/-104)
AppServer/google/appengine/api/xmpp/SOAPpy/Parser.py (+0/-1067)
AppServer/google/appengine/api/xmpp/SOAPpy/SOAP.py (+0/-40)
AppServer/google/appengine/api/xmpp/SOAPpy/SOAPBuilder.py (+0/-636)
AppServer/google/appengine/api/xmpp/SOAPpy/Server.py (+0/-706)
AppServer/google/appengine/api/xmpp/SOAPpy/Types.py (+0/-1736)
AppServer/google/appengine/api/xmpp/SOAPpy/URLopener.py (+0/-23)
AppServer/google/appengine/api/xmpp/SOAPpy/Utilities.py (+0/-178)
AppServer/google/appengine/api/xmpp/SOAPpy/WSDL.py (+0/-119)
AppServer/google/appengine/api/xmpp/SOAPpy/__init__.py (+0/-16)
AppServer/google/appengine/api/xmpp/SOAPpy/version.py (+0/-2)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/Namespaces.py (+0/-124)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/TimeoutSocket.py (+0/-179)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/UserTuple.py (+0/-99)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/Utility.py (+0/-1348)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/WSDLTools.py (+0/-1602)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/XMLSchema.py (+0/-2879)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/XMLname.py (+0/-90)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/__init__.py (+0/-8)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/c14n.py (+0/-535)
AppServer/google/appengine/api/xmpp/SOAPpy/wstools/logging.py (+0/-84)
AppServer/google/appengine/api/xmpp/__init__.py (+24/-4)
AppServer/google/appengine/api/xmpp/xmpp_service_pb.py (+18/-0)
AppServer/google/appengine/api/xmpp/xmpp_service_stub.py (+2/-1)
AppServer/google/appengine/base/capabilities_pb.py (+11/-0)
AppServer/google/appengine/cron/GrocLexer.py (+148/-111)
AppServer/google/appengine/cron/GrocParser.py (+68/-68)
AppServer/google/appengine/datastore/datastore_index.py (+75/-11)
AppServer/google/appengine/datastore/datastore_pb.py (+1008/-709)
AppServer/google/appengine/datastore/datastore_query.py (+1641/-228)
AppServer/google/appengine/datastore/datastore_rpc.py (+501/-258)
AppServer/google/appengine/datastore/datastore_sqlite_stub.py (+224/-922)
AppServer/google/appengine/datastore/document_pb.py (+766/-0)
AppServer/google/appengine/datastore/entity_pb.py (+18/-0)
AppServer/google/appengine/dist/__init__.py (+2/-0)
AppServer/google/appengine/dist/_library.py (+58/-3)
AppServer/google/appengine/dist/httplib.py (+12/-3)
AppServer/google/appengine/dist27/__init__.py (+18/-0)
AppServer/google/appengine/dist27/httplib.py (+791/-0)
AppServer/google/appengine/dist27/urllib.py (+1641/-0)
AppServer/google/appengine/ext/admin/__init__.py (+830/-121)
AppServer/google/appengine/ext/admin/datastore_stats_generator.py (+327/-0)
AppServer/google/appengine/ext/admin/templates/backend.html (+45/-0)
AppServer/google/appengine/ext/admin/templates/backends.html (+78/-0)
AppServer/google/appengine/ext/admin/templates/base.html (+5/-1)
AppServer/google/appengine/ext/admin/templates/css/datastore.css (+6/-0)
AppServer/google/appengine/ext/admin/templates/css/datastore_indexes.css (+57/-0)
AppServer/google/appengine/ext/admin/templates/datastore.html (+5/-2)
AppServer/google/appengine/ext/admin/templates/datastore_edit.html (+2/-1)
AppServer/google/appengine/ext/admin/templates/datastore_indexes.html (+67/-0)
AppServer/google/appengine/ext/admin/templates/datastore_stats.html (+47/-0)
AppServer/google/appengine/ext/admin/templates/inboundmail.html (+1/-0)
AppServer/google/appengine/ext/admin/templates/interactive.html (+1/-0)
AppServer/google/appengine/ext/admin/templates/memcache.html (+3/-0)
AppServer/google/appengine/ext/admin/templates/queues.html (+4/-4)
AppServer/google/appengine/ext/admin/templates/search.html (+53/-0)
AppServer/google/appengine/ext/admin/templates/tasks.html (+22/-5)
AppServer/google/appengine/ext/admin/templates/xmpp.html (+1/-0)
AppServer/google/appengine/ext/appstats/datamodel_pb.py (+14/-0)
AppServer/google/appengine/ext/appstats/recording.py (+186/-95)
AppServer/google/appengine/ext/appstats/static/appstats_css.css (+1/-1)
AppServer/google/appengine/ext/appstats/static/appstats_js.js (+79/-81)
AppServer/google/appengine/ext/appstats/templates/details.html (+2/-4)
AppServer/google/appengine/ext/appstats/templates/main.html (+0/-1)
AppServer/google/appengine/ext/appstats/ui.py (+25/-29)
AppServer/google/appengine/ext/blobstore/blobstore.py (+57/-4)
AppServer/google/appengine/ext/builtins/__init__.py (+43/-27)
AppServer/google/appengine/ext/builtins/admin_redirect/include-python27.yaml (+3/-0)
AppServer/google/appengine/ext/builtins/appstats/include-python27.yaml (+3/-0)
AppServer/google/appengine/ext/builtins/datastore_admin/include-python27.yaml (+12/-0)
AppServer/google/appengine/ext/builtins/default/include-python27.yaml (+1/-0)
AppServer/google/appengine/ext/builtins/deferred/include-python27.yaml (+4/-0)
AppServer/google/appengine/ext/builtins/mapreduce/include-python27.yaml (+4/-0)
AppServer/google/appengine/ext/builtins/remote_api/include-python27.yaml (+3/-0)
AppServer/google/appengine/ext/datastore_admin/copy_handler.py (+62/-37)
AppServer/google/appengine/ext/datastore_admin/delete_handler.py (+6/-1)
AppServer/google/appengine/ext/datastore_admin/remote_api_put_stub.py (+1/-1)
AppServer/google/appengine/ext/datastore_admin/static/css/compiled.css (+1/-1)
AppServer/google/appengine/ext/datastore_admin/static/js/compiled.js (+18/-20)
AppServer/google/appengine/ext/datastore_admin/utils.py (+4/-3)
AppServer/google/appengine/ext/db/__init__.py (+374/-170)
AppServer/google/appengine/ext/db/djangoforms.py (+27/-8)
AppServer/google/appengine/ext/db/metadata.py (+123/-10)
AppServer/google/appengine/ext/db/polymodel.py (+17/-2)
AppServer/google/appengine/ext/db/stats.py (+121/-0)
AppServer/google/appengine/ext/deferred/deferred.py (+24/-4)
AppServer/google/appengine/ext/django/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/backends/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/backends/rdbms/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/backends/rdbms/base.py (+27/-0)
AppServer/google/appengine/ext/django/management/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/management/commands/__init__.py (+16/-0)
AppServer/google/appengine/ext/django/management/commands/getoauthtoken.py (+27/-0)
AppServer/google/appengine/ext/ereporter/report_generator.py (+8/-9)
AppServer/google/appengine/ext/gql/__init__.py (+56/-9)
AppServer/google/appengine/ext/key_range/__init__.py (+8/-5)
AppServer/google/appengine/ext/mapreduce/base_handler.py (+36/-3)
AppServer/google/appengine/ext/mapreduce/context.py (+1/-4)
AppServer/google/appengine/ext/mapreduce/control.py (+14/-7)
AppServer/google/appengine/ext/mapreduce/errors.py (+21/-0)
AppServer/google/appengine/ext/mapreduce/handlers.py (+148/-79)
AppServer/google/appengine/ext/mapreduce/input_readers.py (+294/-56)
AppServer/google/appengine/ext/mapreduce/main.py (+14/-2)
AppServer/google/appengine/ext/mapreduce/mapper_pipeline.py (+127/-0)
AppServer/google/appengine/ext/mapreduce/mapreduce_pipeline.py (+202/-0)
AppServer/google/appengine/ext/mapreduce/model.py (+44/-21)
AppServer/google/appengine/ext/mapreduce/output_writers.py (+325/-37)
AppServer/google/appengine/ext/mapreduce/shuffler.py (+555/-0)
AppServer/google/appengine/ext/mapreduce/static/base.css (+0/-14)
AppServer/google/appengine/ext/mapreduce/static/status.js (+44/-16)
AppServer/google/appengine/ext/mapreduce/status.py (+1/-2)
AppServer/google/appengine/ext/mapreduce/test_support.py (+150/-0)
AppServer/google/appengine/ext/mapreduce/util.py (+164/-3)
AppServer/google/appengine/ext/ndb/__init__.py (+3/-0)
AppServer/google/appengine/ext/ndb/context.py (+1086/-0)
AppServer/google/appengine/ext/ndb/eventloop.py (+262/-0)
AppServer/google/appengine/ext/ndb/key.py (+763/-0)
AppServer/google/appengine/ext/ndb/model.py (+2596/-0)
AppServer/google/appengine/ext/ndb/query.py (+1541/-0)
AppServer/google/appengine/ext/ndb/tasklets.py (+1016/-0)
AppServer/google/appengine/ext/ndb/test_utils.py (+87/-0)
AppServer/google/appengine/ext/ndb/utils.py (+101/-0)
AppServer/google/appengine/ext/remote_api/handler.py (+4/-3)
AppServer/google/appengine/ext/remote_api/remote_api_pb.py (+13/-0)
AppServer/google/appengine/ext/remote_api/remote_api_services.py (+80/-11)
AppServer/google/appengine/ext/remote_api/throttle.py (+7/-2)
AppServer/google/appengine/ext/search/__init__.py (+2/-2)
AppServer/google/appengine/ext/testbed/__init__.py (+209/-71)
AppServer/google/appengine/ext/webapp/__init__.py (+101/-727)
AppServer/google/appengine/ext/webapp/_template.py (+54/-0)
AppServer/google/appengine/ext/webapp/_webapp25.py (+792/-0)
AppServer/google/appengine/ext/webapp/blobstore_handlers.py (+65/-16)
AppServer/google/appengine/ext/webapp/template.py (+77/-14)
AppServer/google/appengine/runtime/apiproxy.py (+20/-0)
AppServer/google/appengine/runtime/cgi.py (+301/-0)
AppServer/google/appengine/runtime/request_environment.py (+152/-0)
AppServer/google/appengine/runtime/runtime.py (+184/-0)
AppServer/google/appengine/runtime/wsgi.py (+253/-0)
AppServer/google/appengine/tools/appcfg.py (+947/-310)
AppServer/google/appengine/tools/dev-channel-js.js (+535/-1187)
AppServer/google/appengine/tools/dev_appserver.py (+847/-1856)
AppServer/google/appengine/tools/dev_appserver_blobimage.py (+6/-2)
AppServer/google/appengine/tools/dev_appserver_blobstore.py (+177/-92)
AppServer/google/appengine/tools/dev_appserver_import_hook.py (+1814/-0)
AppServer/google/appengine/tools/dev_appserver_index.py (+35/-21)
AppServer/google/appengine/tools/dev_appserver_login.py (+0/-5)
AppServer/google/appengine/tools/dev_appserver_main.py (+300/-123)
AppServer/google/appengine/tools/dev_appserver_multiprocess.py (+1093/-0)
AppServer/google/appengine/tools/gen_protorpc.py (+300/-0)
AppServer/google/appengine/tools/os_compat.py (+2/-1)
AppServer/google/net/proto/ProtocolBuffer.py (+440/-6)
AppServer/google/net/proto2/proto/descriptor_pb2.py (+48/-33)
AppServer/google/net/proto2/python/internal/containers.py (+5/-0)
AppServer/google/net/proto2/python/internal/enum_type_wrapper.py (+40/-0)
AppServer/google/net/proto2/python/internal/python_message.py (+9/-0)
AppServer/google/net/proto2/python/public/descriptor.py (+81/-0)
AppServer/google/net/proto2/python/public/message.py (+12/-0)
AppServer/google/net/proto2/python/public/reflection.py (+22/-0)
AppServer/google/net/proto2/python/public/text_format.py (+145/-111)
AppServer/google/storage/speckle/proto/client_error_code_pb2.py (+195/-0)
AppServer/google/storage/speckle/proto/client_pb2.py (+426/-57)
AppServer/google/storage/speckle/proto/jdbc_type.py (+2/-2)
AppServer/google/storage/speckle/proto/sql_pb2.py (+146/-26)
AppServer/google/storage/speckle/python/api/constants/CLIENT.py (+53/-0)
AppServer/google/storage/speckle/python/api/constants/FIELD_TYPE.py (+57/-0)
AppServer/google/storage/speckle/python/api/constants/FLAG.py (+48/-0)
AppServer/google/storage/speckle/python/api/constants/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/api/converters.py (+188/-0)
AppServer/google/storage/speckle/python/api/rdbms.py (+383/-88)
AppServer/google/storage/speckle/python/api/rdbms_apiproxy.py (+9/-41)
AppServer/google/storage/speckle/python/api/rdbms_googleapi.py (+244/-0)
AppServer/google/storage/speckle/python/django/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/backend/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/backend/base.py (+240/-0)
AppServer/google/storage/speckle/python/django/backend/client.py (+48/-0)
AppServer/google/storage/speckle/python/django/backend/oauth2storage.py (+58/-0)
AppServer/google/storage/speckle/python/django/management/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/management/commands/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/django/management/commands/getoauthtoken.py (+75/-0)
AppServer/google/storage/speckle/python/tool/__init__.py (+16/-0)
AppServer/google/storage/speckle/python/tool/google_sql.py (+229/-0)
AppServer/google_sql.py (+99/-0)
AppServer/lib/antlr3/OWNERS (+2/-5)
AppServer/lib/cacerts/urlfetch_cacerts.txt (+785/-1408)
AppServer/lib/enum/enum/LICENSE (+340/-0)
AppServer/lib/enum/enum/__init__.py (+239/-0)
AppServer/lib/enum/enum/test/test_enum.py (+547/-0)
AppServer/lib/enum/enum/test/tools.py (+28/-0)
AppServer/lib/google-api-python-client/LICENSE (+202/-0)
AppServer/lib/google-api-python-client/MANIFEST.in (+19/-0)
AppServer/lib/google-api-python-client/PKG-INFO (+17/-0)
AppServer/lib/google-api-python-client/README (+48/-0)
AppServer/lib/google-api-python-client/apiclient/anyjson.py (+32/-0)
AppServer/lib/google-api-python-client/apiclient/contrib/buzz/future.json (+142/-0)
AppServer/lib/google-api-python-client/apiclient/contrib/latitude/future.json (+81/-0)
AppServer/lib/google-api-python-client/apiclient/contrib/moderator/future.json (+107/-0)
AppServer/lib/google-api-python-client/apiclient/discovery.py (+659/-0)
AppServer/lib/google-api-python-client/apiclient/errors.py (+99/-0)
AppServer/lib/google-api-python-client/apiclient/ext/appengine.py (+135/-0)
AppServer/lib/google-api-python-client/apiclient/ext/authtools.py (+159/-0)
AppServer/lib/google-api-python-client/apiclient/ext/django_orm.py (+56/-0)
AppServer/lib/google-api-python-client/apiclient/ext/file.py (+63/-0)
AppServer/lib/google-api-python-client/apiclient/http.py (+350/-0)
AppServer/lib/google-api-python-client/apiclient/mimeparse.py (+172/-0)
AppServer/lib/google-api-python-client/apiclient/model.py (+346/-0)
AppServer/lib/google-api-python-client/apiclient/oauth.py (+483/-0)
AppServer/lib/google-api-python-client/bin/enable-app-engine-project (+138/-0)
AppServer/lib/google-api-python-client/functional_tests/test_services.py (+288/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/PKG-INFO (+17/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/SOURCES.txt (+295/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/dependency_links.txt (+1/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/requires.txt (+3/-0)
AppServer/lib/google-api-python-client/google_api_python_client.egg-info/top_level.txt (+3/-0)
AppServer/lib/google-api-python-client/oauth2client/appengine.py (+541/-0)
AppServer/lib/google-api-python-client/oauth2client/client.py (+771/-0)
AppServer/lib/google-api-python-client/oauth2client/clientsecrets.py (+113/-0)
AppServer/lib/google-api-python-client/oauth2client/django_orm.py (+114/-0)
AppServer/lib/google-api-python-client/oauth2client/file.py (+92/-0)
AppServer/lib/google-api-python-client/oauth2client/multistore_file.py (+367/-0)
AppServer/lib/google-api-python-client/oauth2client/tools.py (+154/-0)
AppServer/lib/google-api-python-client/runsamples.py (+101/-0)
AppServer/lib/google-api-python-client/runtests.py (+85/-0)
AppServer/lib/google-api-python-client/setpath.sh (+1/-0)
AppServer/lib/google-api-python-client/setup.cfg (+5/-0)
AppServer/lib/google-api-python-client/setup.py (+70/-0)
AppServer/lib/google-api-python-client/uritemplate/__init__.py (+147/-0)
AppServer/lib/grizzled/grizzled/CHANGELOG (+191/-0)
AppServer/lib/grizzled/grizzled/LICENSE (+31/-0)
AppServer/lib/grizzled/grizzled/README (+9/-0)
AppServer/lib/grizzled/grizzled/__init__.py (+23/-0)
AppServer/lib/grizzled/grizzled/cmdline.py (+89/-0)
AppServer/lib/grizzled/grizzled/collections/__init__.py (+21/-0)
AppServer/lib/grizzled/grizzled/collections/dict.py (+559/-0)
AppServer/lib/grizzled/grizzled/config.py (+973/-0)
AppServer/lib/grizzled/grizzled/db/__init__.py (+205/-0)
AppServer/lib/grizzled/grizzled/db/base.py (+980/-0)
AppServer/lib/grizzled/grizzled/db/dbgadfly.py (+185/-0)
AppServer/lib/grizzled/grizzled/db/dummydb.py (+81/-0)
AppServer/lib/grizzled/grizzled/db/mysql.py (+140/-0)
AppServer/lib/grizzled/grizzled/db/oracle.py (+160/-0)
AppServer/lib/grizzled/grizzled/db/postgresql.py (+227/-0)
AppServer/lib/grizzled/grizzled/db/sqlite.py (+141/-0)
AppServer/lib/grizzled/grizzled/db/sqlserver.py (+124/-0)
AppServer/lib/grizzled/grizzled/decorators.py (+181/-0)
AppServer/lib/grizzled/grizzled/exception.py (+52/-0)
AppServer/lib/grizzled/grizzled/file/__init__.py (+323/-0)
AppServer/lib/grizzled/grizzled/file/includer.py (+473/-0)
AppServer/lib/grizzled/grizzled/history.py (+530/-0)
AppServer/lib/grizzled/grizzled/io/__init__.py (+407/-0)
AppServer/lib/grizzled/grizzled/io/filelock.py (+202/-0)
AppServer/lib/grizzled/grizzled/log.py (+122/-0)
AppServer/lib/grizzled/grizzled/misc.py (+146/-0)
AppServer/lib/grizzled/grizzled/net/__init__.py (+103/-0)
AppServer/lib/grizzled/grizzled/net/ftp/__init__.py (+6/-0)
AppServer/lib/grizzled/grizzled/net/ftp/parse.py (+686/-0)
AppServer/lib/grizzled/grizzled/os.py (+386/-0)
AppServer/lib/grizzled/grizzled/proxy.py (+118/-0)
AppServer/lib/grizzled/grizzled/system.py (+244/-0)
AppServer/lib/grizzled/grizzled/test/README (+1/-0)
AppServer/lib/grizzled/grizzled/test/TestProxy.py (+71/-0)
AppServer/lib/grizzled/grizzled/test/collections/TestLRUDict.py (+108/-0)
AppServer/lib/grizzled/grizzled/test/config/TestConfiguration.py (+179/-0)
AppServer/lib/grizzled/grizzled/test/file/Test.py (+62/-0)
AppServer/lib/grizzled/grizzled/test/io/TestPushback.py (+56/-0)
AppServer/lib/grizzled/grizzled/test/misc/TestReadOnly.py (+58/-0)
AppServer/lib/grizzled/grizzled/test/net/ftp/TestFTPListParse.py (+180/-0)
AppServer/lib/grizzled/grizzled/test/system/Test.py (+56/-0)
AppServer/lib/grizzled/grizzled/test/test_helpers.py (+13/-0)
AppServer/lib/grizzled/grizzled/test/text/TestStr2Bool.py (+46/-0)
AppServer/lib/grizzled/grizzled/text/__init__.py (+217/-0)
AppServer/lib/httplib2/httplib2/LICENSE (+21/-0)
AppServer/lib/httplib2/httplib2/OWNERS (+5/-0)
AppServer/lib/httplib2/httplib2/__init__.py (+1529/-0)
AppServer/lib/httplib2/httplib2/cacerts.txt (+714/-0)
AppServer/lib/httplib2/httplib2/httplib2_test.py (+21/-0)
AppServer/lib/httplib2/httplib2/iri2uri.py (+110/-0)
AppServer/lib/httplib2/httplib2/sync_from_mercurial.sh (+49/-0)
AppServer/lib/httplib2/httplib2/test/brokensocket/socket.py (+1/-0)
AppServer/lib/httplib2/httplib2/test/functional/test_proxies.py (+88/-0)
AppServer/lib/httplib2/httplib2/test/miniserver.py (+100/-0)
AppServer/lib/httplib2/httplib2/test/smoke_test.py (+23/-0)
AppServer/lib/httplib2/httplib2/test/test_no_socket.py (+24/-0)
AppServer/lib/oauth2/oauth2/LICENSE (+21/-0)
AppServer/lib/oauth2/oauth2/OWNERS (+2/-0)
AppServer/lib/oauth2/oauth2/__init__.py (+858/-0)
AppServer/lib/prettytable/prettytable/LICENSE (+25/-0)
AppServer/lib/prettytable/prettytable/__init__.py (+625/-0)
AppServer/lib/protorpc/LICENSE (+202/-0)
AppServer/lib/protorpc/protorpc/definition.py (+275/-0)
AppServer/lib/protorpc/protorpc/descriptor.py (+699/-0)
AppServer/lib/protorpc/protorpc/generate.py (+127/-0)
AppServer/lib/protorpc/protorpc/generate_proto.py (+127/-0)
AppServer/lib/protorpc/protorpc/generate_python.py (+204/-0)
AppServer/lib/protorpc/protorpc/message_types.py (+26/-0)
AppServer/lib/protorpc/protorpc/messages.py (+1696/-0)
AppServer/lib/protorpc/protorpc/protobuf.py (+318/-0)
AppServer/lib/protorpc/protorpc/protojson.py (+207/-0)
AppServer/lib/protorpc/protorpc/protourlencode.py (+540/-0)
AppServer/lib/protorpc/protorpc/registry.py (+241/-0)
AppServer/lib/protorpc/protorpc/remote.py (+1211/-0)
AppServer/lib/protorpc/protorpc/static/base.html (+57/-0)
AppServer/lib/protorpc/protorpc/static/forms.html (+31/-0)
AppServer/lib/protorpc/protorpc/static/forms.js (+685/-0)
AppServer/lib/protorpc/protorpc/static/jquery-1.4.2.min.js (+154/-0)
AppServer/lib/protorpc/protorpc/static/jquery.json-2.2.min.js (+31/-0)
AppServer/lib/protorpc/protorpc/static/methods.html (+37/-0)
AppServer/lib/protorpc/protorpc/transport.py (+423/-0)
AppServer/lib/protorpc/protorpc/util.py (+359/-0)
AppServer/lib/protorpc/protorpc/webapp/__init__.py (+18/-0)
AppServer/lib/protorpc/protorpc/webapp/forms.py (+163/-0)
AppServer/lib/protorpc/protorpc/webapp/service_handlers.py (+842/-0)
AppServer/lib/protorpc/protorpc/wsgi/__init__.py (+16/-0)
AppServer/lib/protorpc/protorpc/wsgi/service.py (+204/-0)
AppServer/lib/protorpc/protorpc/wsgi/util.py (+109/-0)
AppServer/lib/python-gflags/AUTHORS (+2/-0)
AppServer/lib/python-gflags/ChangeLog (+41/-0)
AppServer/lib/python-gflags/LICENSE (+28/-0)
AppServer/lib/python-gflags/MANIFEST.in (+19/-0)
AppServer/lib/python-gflags/Makefile (+69/-0)
AppServer/lib/python-gflags/NEWS (+48/-0)
AppServer/lib/python-gflags/OWNERS (+1/-0)
AppServer/lib/python-gflags/PKG-INFO (+10/-0)
AppServer/lib/python-gflags/README (+23/-0)
AppServer/lib/python-gflags/debian/README (+7/-0)
AppServer/lib/python-gflags/debian/changelog (+36/-0)
AppServer/lib/python-gflags/debian/compat (+1/-0)
AppServer/lib/python-gflags/debian/control (+26/-0)
AppServer/lib/python-gflags/debian/copyright (+41/-0)
AppServer/lib/python-gflags/debian/docs (+2/-0)
AppServer/lib/python-gflags/debian/rules (+62/-0)
AppServer/lib/python-gflags/gflags.py (+2769/-0)
AppServer/lib/python-gflags/gflags2man.py (+544/-0)
AppServer/lib/python-gflags/gflags_validators.py (+187/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/PKG-INFO (+10/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/SOURCES.txt (+30/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/dependency_links.txt (+1/-0)
AppServer/lib/python-gflags/python_gflags.egg-info/top_level.txt (+2/-0)
AppServer/lib/python-gflags/setup.cfg (+5/-0)
AppServer/lib/python-gflags/setup.py (+44/-0)
AppServer/lib/python-gflags/tests/flags_modules_for_testing/module_bar.py (+135/-0)
AppServer/lib/python-gflags/tests/flags_modules_for_testing/module_baz.py (+45/-0)
AppServer/lib/python-gflags/tests/flags_modules_for_testing/module_foo.py (+141/-0)
AppServer/lib/python-gflags/tests/gflags_googletest.py (+109/-0)
AppServer/lib/python-gflags/tests/gflags_helpxml_test.py (+535/-0)
AppServer/lib/python-gflags/tests/gflags_unittest.py (+1866/-0)
AppServer/lib/python-gflags/tests/gflags_validators_test.py (+220/-0)
AppServer/lib/simplejson/simplejson/__init__.py (+248/-120)
AppServer/lib/simplejson/simplejson/decoder.py (+159/-72)
AppServer/lib/simplejson/simplejson/encoder.py (+111/-42)
AppServer/lib/simplejson/simplejson/scanner.py (+18/-7)
AppServer/lib/sqlcmd/sqlcmd/LICENSE (+30/-0)
AppServer/lib/sqlcmd/sqlcmd/README (+19/-0)
AppServer/lib/sqlcmd/sqlcmd/__init__.py (+1871/-0)
AppServer/lib/sqlcmd/sqlcmd/config.py (+274/-0)
AppServer/lib/sqlcmd/sqlcmd/ecmd.py (+161/-0)
AppServer/lib/sqlcmd/sqlcmd/exception.py (+77/-0)
AppServer/lib/sqlcmd/sqlcmd/sqlcmd_test.py (+18/-0)
AppServer/lib/webapp2/AUTHORS (+28/-0)
AppServer/lib/webapp2/CHANGES (+615/-0)
AppServer/lib/webapp2/LICENSE (+321/-0)
AppServer/lib/webapp2/MANIFEST.in (+13/-0)
AppServer/lib/webapp2/Makefile (+3/-0)
AppServer/lib/webapp2/PKG-INFO (+51/-0)
AppServer/lib/webapp2/README (+7/-0)
AppServer/lib/webapp2/TODO (+66/-0)
AppServer/lib/webapp2/docs/Makefile (+130/-0)
AppServer/lib/webapp2/docs/_static/README (+1/-0)
AppServer/lib/webapp2/docs/_templates/README (+1/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/layout.html (+224/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/pygapp2.py (+57/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/static/gcode.css (+1965/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/static/webapp2.css (+258/-0)
AppServer/lib/webapp2/docs/_themes/webapp2/theme.conf (+11/-0)
AppServer/lib/webapp2/docs/api/extras.config.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.i18n.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.jinja2.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.json.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.local.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.local_app.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.mako.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.protorpc.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.routes.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.securecookie.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.security.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.sessions.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.sessions_memcache.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.sessions_ndb.rst (+3/-0)
AppServer/lib/webapp2/docs/api/extras.users.rst (+3/-0)
AppServer/lib/webapp2/docs/api/index.rst (+10/-0)
AppServer/lib/webapp2/docs/api/webapp2.rst (+152/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/auth/models.rst (+27/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/sessions_memcache.rst (+7/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/sessions_ndb.rst (+13/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/appengine/users.rst (+9/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/auth.rst (+22/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/config.rst (+17/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/i18n.rst (+59/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/jinja2.rst (+30/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/json.rst (+25/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/local.rst (+10/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/local_app.rst (+12/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/mako.rst (+30/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/protorpc.rst (+25/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/routes.rst (+24/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/securecookie.rst (+12/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/security.rst (+17/-0)
AppServer/lib/webapp2/docs/api/webapp2_extras/sessions.rst (+30/-0)
AppServer/lib/webapp2/docs/conf.py (+257/-0)
AppServer/lib/webapp2/docs/features.rst (+289/-0)
AppServer/lib/webapp2/docs/guide/app.rst (+275/-0)
AppServer/lib/webapp2/docs/guide/exceptions.rst (+139/-0)
AppServer/lib/webapp2/docs/guide/extras.rst (+64/-0)
AppServer/lib/webapp2/docs/guide/handlers.rst (+283/-0)
AppServer/lib/webapp2/docs/guide/index.rst (+9/-0)
AppServer/lib/webapp2/docs/guide/request.rst (+211/-0)
AppServer/lib/webapp2/docs/guide/response.rst (+128/-0)
AppServer/lib/webapp2/docs/guide/routing.rst (+392/-0)
AppServer/lib/webapp2/docs/guide/testing.rst (+118/-0)
AppServer/lib/webapp2/docs/index.rst (+205/-0)
AppServer/lib/webapp2/docs/make.bat (+155/-0)
AppServer/lib/webapp2/docs/todo.rst (+178/-0)
AppServer/lib/webapp2/docs/tutorials/auth.rst (+81/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/devenvironment.rst (+50/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/handlingforms.rst (+80/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/helloworld.rst (+128/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/index.rst (+30/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/introduction.rst (+33/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/staticfiles.rst (+79/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/templates.rst (+101/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/uploading.rst (+69/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/usingdatastore.rst (+341/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/usingusers.rst (+83/-0)
AppServer/lib/webapp2/docs/tutorials/gettingstarted/usingwebapp2.rst (+97/-0)
AppServer/lib/webapp2/docs/tutorials/i18n.rst (+205/-0)
AppServer/lib/webapp2/docs/tutorials/index.rst (+10/-0)
AppServer/lib/webapp2/docs/tutorials/installing.packages.rst (+62/-0)
AppServer/lib/webapp2/docs/tutorials/marketplace.single.signon.rst (+6/-0)
AppServer/lib/webapp2/docs/tutorials/quickstart.nogae.rst (+101/-0)
AppServer/lib/webapp2/docs/tutorials/quickstart.rst (+81/-0)
AppServer/lib/webapp2/docs/tutorials/virtualenv.rst (+49/-0)
AppServer/lib/webapp2/run_tests.py (+55/-0)
AppServer/lib/webapp2/setup.cfg (+5/-0)
AppServer/lib/webapp2/setup.py (+68/-0)
AppServer/lib/webapp2/tests/extras_appengine_auth_models_test.py (+196/-0)
AppServer/lib/webapp2/tests/extras_appengine_sessions_memcache_test.py (+136/-0)
AppServer/lib/webapp2/tests/extras_appengine_sessions_ndb_test.py (+172/-0)
AppServer/lib/webapp2/tests/extras_appengine_users_test.py (+98/-0)
AppServer/lib/webapp2/tests/extras_auth_test.py (+299/-0)
AppServer/lib/webapp2/tests/extras_config_test.py (+347/-0)
AppServer/lib/webapp2/tests/extras_i18n_test.py (+399/-0)
AppServer/lib/webapp2/tests/extras_jinja2_test.py (+96/-0)
AppServer/lib/webapp2/tests/extras_json_test.py (+37/-0)
AppServer/lib/webapp2/tests/extras_local_app_test.py (+20/-0)
AppServer/lib/webapp2/tests/extras_mako_test.py (+45/-0)
AppServer/lib/webapp2/tests/extras_protorpc_test.py (+196/-0)
AppServer/lib/webapp2/tests/extras_routes_test.py (+289/-0)
AppServer/lib/webapp2/tests/extras_securecookie_test.py (+46/-0)
AppServer/lib/webapp2/tests/extras_security_test.py (+61/-0)
AppServer/lib/webapp2/tests/extras_sessions_test.py (+219/-0)
AppServer/lib/webapp2/tests/handler_test.py (+682/-0)
AppServer/lib/webapp2/tests/misc_test.py (+130/-0)
AppServer/lib/webapp2/tests/request_test.py (+307/-0)
AppServer/lib/webapp2/tests/resources/__init__.py (+1/-0)
AppServer/lib/webapp2/tests/resources/handlers.py (+15/-0)
AppServer/lib/webapp2/tests/resources/i18n.py (+14/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/hello.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/template1.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/template2.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates/template3.html (+1/-0)
AppServer/lib/webapp2/tests/resources/jinja2_templates_compiled/tmpl_3a79873b1b49be244fd5444b1258ce348be26de8.py (+11/-0)
AppServer/lib/webapp2/tests/resources/mako_templates/template1.html (+1/-0)
AppServer/lib/webapp2/tests/resources/protorpc_services.py (+26/-0)
AppServer/lib/webapp2/tests/resources/template.py (+3/-0)
AppServer/lib/webapp2/tests/response_test.py (+331/-0)
AppServer/lib/webapp2/tests/routing_test.py (+314/-0)
AppServer/lib/webapp2/tests/test_base.py (+86/-0)
AppServer/lib/webapp2/tests/webapp1_test.py (+126/-0)
AppServer/lib/webapp2/webapp2.egg-info/PKG-INFO (+51/-0)
AppServer/lib/webapp2/webapp2.egg-info/SOURCES.txt (+150/-0)
AppServer/lib/webapp2/webapp2.egg-info/dependency_links.txt (+1/-0)
AppServer/lib/webapp2/webapp2.egg-info/not-zip-safe (+1/-0)
AppServer/lib/webapp2/webapp2.egg-info/top_level.txt (+2/-0)
AppServer/lib/webapp2/webapp2.py (+1960/-0)
AppServer/lib/webapp2/webapp2_extras/__init__.py (+10/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/__init__.py (+10/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/auth/__init__.py (+10/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/auth/models.py (+390/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/sessions_memcache.py (+51/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/sessions_ndb.py (+120/-0)
AppServer/lib/webapp2/webapp2_extras/appengine/users.py (+70/-0)
AppServer/lib/webapp2/webapp2_extras/auth.py (+644/-0)
AppServer/lib/webapp2/webapp2_extras/config.py (+228/-0)
AppServer/lib/webapp2/webapp2_extras/i18n.py (+915/-0)
AppServer/lib/webapp2/webapp2_extras/jinja2.py (+230/-0)
AppServer/lib/webapp2/webapp2_extras/json.py (+108/-0)
AppServer/lib/webapp2/webapp2_extras/local.py (+231/-0)
AppServer/lib/webapp2/webapp2_extras/local_app.py (+24/-0)
AppServer/lib/webapp2/webapp2_extras/mako.py (+136/-0)
AppServer/lib/webapp2/webapp2_extras/protorpc.py (+215/-0)
AppServer/lib/webapp2/webapp2_extras/routes.py (+353/-0)
AppServer/lib/webapp2/webapp2_extras/securecookie.py (+100/-0)
AppServer/lib/webapp2/webapp2_extras/security.py (+218/-0)
AppServer/lib/webapp2/webapp2_extras/sessions.py (+476/-0)
AppServer/lib/webapp2/webapp2_extras/sessions_memcache.py (+20/-0)
AppServer/lib/webapp2/webapp2_extras/sessions_ndb.py (+20/-0)
AppServer/lib/webapp2/webapp2_extras/users.py (+20/-0)
AppServer/new_project_template/app.yaml (+4/-0)
AppServer/remote_api_shell.py (+29/-8)
AppServer/templates/logging_console.js (+0/-257)
AppServer/templates/logging_console_footer.html (+0/-4)
AppServer/templates/logging_console_header.html (+0/-71)
AppServer/templates/logging_console_middle.html (+0/-4)
AppServer_Java/src/com/google/appengine/api/blobstore/BlobstoreServiceImpl.java (+6/-6)
AppServer_Java/src/com/google/appengine/api/blobstore/dev/DatastoreBlobStorage.java (+86/-50)
AppServer_Java/src/com/google/appengine/api/blobstore/dev/LocalBlobstoreService.java (+5/-5)
AppServer_Java/src/com/google/appengine/api/datastore/dev/HTTPClientDatastoreProxy.java (+39/-24)
AppServer_Java/src/com/google/appengine/api/datastore/dev/LocalDatastoreService.java (+47/-49)
AppServer_Java/src/com/google/appengine/api/memcache/dev/LocalMemcacheService.java (+13/-13)
AppServer_Java/src/com/google/appengine/api/users/dev/LocalLoginServlet.java (+6/-6)
AppServer_Java/src/com/google/appengine/api/users/dev/LoginCookieUtils.java (+10/-11)
LICENSE (+6/-0)
RELEASE (+10/-1)
VERSION (+7/-1)
debian/appscale_build.sh (+11/-3)
debian/appscale_install.sh (+6/-0)
debian/appscale_install_functions.sh (+76/-190)
debian/control.core.lucid (+3/-0)
To merge this branch: bzr merge lp:~nchohan/appscale/appscale-1.6
Reviewer Review Type Date Requested Status
Chris Bunch Approve
Navraj Chohan (community) Needs Resubmitting
Review via email: mp+93084@code.launchpad.net

Description of the change

Tested on 1 node for Cassandra, HBase, Hypertable, Mysql, RedisDB, mongodb
Tested on 3 nodes for the same.
Tested with multiple applications with Java/Go/Python.

New items:
RabbitMQ for the task queue system.
HBase 0.90.4
Cassandra 1.0.7
Hypertable 0.9.5.5

GAE 1.6.1 for python and go

Some bug fixes

To post a comment you must log in.
Revision history for this message
Navraj Chohan (nchohan) wrote :

Updated hadoop to 0.20.2-cdh3u3. HBase is also cdh3u3.

Fixed a security issue with task queue request.

Revision history for this message
Chris Bunch (cgb-cs) wrote :

Testing now - works on 2 node Cassandra simple deployment with pipeline-test sample app. Continuing with other apps / deployments.

Revision history for this message
Chris Bunch (cgb-cs) wrote :

A few style comments:

In rabbitmq.rb:

1) On line 11, remove the trailing space.

2) On line 15, shouldn't the semi-colons be double ampersands? Don't you want to only exec the second command if the first succeeds?

3) Lines 20-21 are commented out since you're not using god to manage rabbitmq - thus, remove those and 16-18, since you're defining variables that you're not using.

4) On 11, 25, and 54, you don't need to pass in the secret - functions can get it with HelperFunctions.get_secret()

5) On line 30, extract the rabbitmq port to a global constant

6) Remove line 41, since you don't use it.

7) Remove line 50

8) Remove line 63

review: Needs Fixing
Revision history for this message
Chris Bunch (cgb-cs) wrote :

More style stuff:

djinn_job_data, nginx, and pbserver looks good to me

In haproxy.rb, you've got these lines:

+ # Timeout a request if Mongrel does not accept a connection for 60 seconds
+ timeout connect 60000

which seem to indicate that the value is msec - 60000 msec -> 60 sec

but this doesn't seem to hold for the other vals:

+ # Timeout a request if the client did not read any data for 240 seconds
+ timeout client 60000

why isn't the above 60 seconds as well?

In terminate.rb, this whole block:

+#begin
+# require 'rabbitmq'
+# RabbitMQ.stop
+#rescue Execption
+# puts "Problem with rabbitmq, moving on"
+#end
+

is commented out, so just delete it.

In djinn.rb:

1) Remove "Djinn.rb" from all log_debug statements, since it isn't consistently applied (and other files don't identify themselves) and isn't standard for us.

review: Needs Fixing
Revision history for this message
Navraj Chohan (nchohan) wrote :

Fixes committed.

review: Needs Resubmitting
Revision history for this message
Chris Bunch (cgb-cs) :
review: Approve
lp:~nchohan/appscale/appscale-1.6 updated
790. By Navraj Chohan

RabbitMQ for the task queue system. HBase 0.90.4, Cassandra 1.0.7, Hypertable 0.9.5.5, GAE 1.6.1 for python and go, and other bug fixes

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'AppController/djinn.rb'
2--- AppController/djinn.rb 2012-02-01 19:17:26 +0000
3+++ AppController/djinn.rb 2012-02-16 06:02:21 +0000
4@@ -22,6 +22,7 @@
5 require 'nginx'
6 require 'pbserver'
7 require 'blobstore'
8+require 'rabbitmq'
9 require 'app_controller_client'
10 require 'user_app_client'
11 require 'ejabberd'
12@@ -188,7 +189,7 @@
13 return "Error: Credential format wrong"
14 end
15
16- Djinn.log_debug("Parameters were valid")
17+ Djinn.log_debug("Djinn (set_parameters) Parameters were valid")
18
19 keyname = possible_credentials["keyname"]
20 @nodes = Djinn.convert_location_array_to_class(djinn_locations, keyname)
21@@ -198,15 +199,15 @@
22 convert_fqdns_to_ips
23 @creds = sanitize_credentials
24
25- Djinn.log_debug("Djinn locations: #{@nodes.join(', ')}")
26- Djinn.log_debug("DB Credentials: #{HelperFunctions.obscure_creds(@creds).inspect}")
27+ Djinn.log_debug("Djinn (set_parameters) locations: #{@nodes.join(', ')}")
28+ Djinn.log_debug("Djinn (set_parameters) DB Credentials: #{HelperFunctions.obscure_creds(@creds).inspect}")
29 Djinn.log_debug("Apps to load: #{@app_names.join(', ')}")
30
31 find_me_in_locations
32 if @my_index == nil
33 return "Error: Couldn't find me in the node map"
34 end
35- Djinn.log_debug("My index = #{@my_index}")
36+ Djinn.log_debug("Djinn (set_parameters) My index = #{@my_index}")
37
38 ENV['EC2_URL'] = @creds['ec2_url']
39
40@@ -223,7 +224,7 @@
41 end
42
43 @app_names = app_names
44- return "app names is now #{@app_names.join(', ')}"
45+ return "App names is now #{@app_names.join(', ')}"
46 end
47
48 def status(secret)
49@@ -284,8 +285,9 @@
50 def stop_app(app_name, secret)
51 return BAD_SECRET_MSG unless valid_secret?(secret)
52 app_name.gsub!(/[^\w\d\-]/, "")
53- Djinn.log_debug("Shutting down app named [#{app_name}]")
54+ Djinn.log_debug("Djinn (stop_app): Shutting down app named [#{app_name}]")
55 result = ""
56+ Djinn.log_run("rm -rf /var/apps/#{app_name}")
57
58 # app shutdown process can take more than 30 seconds
59 # so run it in a new thread to avoid 'execution expired'
60@@ -299,7 +301,7 @@
61 ip = node.private_ip
62 acc = AppControllerClient.new(ip, @@secret)
63 result = acc.stop_app(app_name)
64- Djinn.log_debug("Removing application #{app_name} --- #{ip} returned #{result} (#{result.class})")
65+ Djinn.log_debug("Djinn (stop_app): Removing application #{app_name} --- #{ip} returned #{result} (#{result.class})")
66 end
67 }
68 end
69@@ -309,7 +311,7 @@
70 ip = HelperFunctions.read_file("#{APPSCALE_HOME}/.appscale/masters")
71 uac = UserAppClient.new(ip, @@secret)
72 result = uac.delete_app(app_name)
73- Djinn.log_debug("Delete app: #{ip} returned #{result} (#{result.class})")
74+ Djinn.log_debug("Djinn (stop_app) Delete app: #{ip} returned #{result} (#{result.class})")
75 end
76
77 # may need to stop XMPP listener
78@@ -364,7 +366,7 @@
79 ip = @nodes[index].private_ip
80 acc = AppControllerClient.new(ip, @@secret)
81 result = acc.set_apps(apps)
82- Djinn.log_debug("#{ip} returned #{result} (#{result.class})")
83+ Djinn.log_debug("djinn.rb:update #{ip} returned #{result} (#{result.class})")
84 @everyone_else_is_done = false if !result
85 }
86
87@@ -706,16 +708,16 @@
88 infrastructure = @creds["infrastructure"]
89
90 if is_hybrid_cloud?
91- Djinn.log_debug("Getting hybrid ips with creds #{@creds.inspect}")
92+ Djinn.log_debug("Djinn (set_uaserver_ips): Getting hybrid ips with creds #{@creds.inspect}")
93 public_ips, private_ips = HelperFunctions.get_hybrid_ips(@creds)
94 else
95- Djinn.log_debug("Getting cloud ips for #{infrastructure} with keyname #{keyname}")
96+ Djinn.log_debug("Djinn (set_uaserver_ips): Getting cloud ips for #{infrastructure} with keyname #{keyname}")
97 public_ips, private_ips = HelperFunctions.get_cloud_ips(infrastructure, keyname)
98 end
99
100- Djinn.log_debug("Public ips are #{public_ips.join(', ')}")
101- Djinn.log_debug("Private ips are #{private_ips.join(', ')}")
102- Djinn.log_debug("Looking for #{ip_addr}")
103+ Djinn.log_debug("Djinn (set_uaserver_ips): Public ips are #{public_ips.join(', ')}")
104+ Djinn.log_debug("Djinn (set_uaserver_ips): Private ips are #{private_ips.join(', ')}")
105+ Djinn.log_debug("Djinn (set_uaserver_ips): Looking for #{ip_addr}")
106
107 public_ips.each_index { |index|
108 node_public_ip = HelperFunctions.convert_fqdn_to_ip(public_ips[index])
109@@ -747,16 +749,16 @@
110 infrastructure = @creds["infrastructure"]
111
112 if is_hybrid_cloud?
113- Djinn.log_debug("Getting hybrid ips with creds #{@creds.inspect}")
114+ Djinn.log_debug("Djinn (set_uaserver_ips): Getting hybrid ips with creds #{@creds.inspect}")
115 public_ips, private_ips = HelperFunctions.get_hybrid_ips(@creds)
116 else
117- Djinn.log_debug("Getting cloud ips for #{infrastructure} with keyname #{keyname}")
118+ Djinn.log_debug("Djinn (set_uaserver_ips): Getting cloud ips for #{infrastructure} with keyname #{keyname}")
119 public_ips, private_ips = HelperFunctions.get_cloud_ips(infrastructure, keyname)
120 end
121
122- Djinn.log_debug("Public ips are #{public_ips.join(', ')}")
123- Djinn.log_debug("Private ips are #{private_ips.join(', ')}")
124- Djinn.log_debug("Looking for #{private_ip}")
125+ Djinn.log_debug("Djinn (set_uaserver_ips): Public ips are #{public_ips.join(', ')}")
126+ Djinn.log_debug("Djinn (set_uaserver_ips): Private ips are #{private_ips.join(', ')}")
127+ Djinn.log_debug("Djinn (set_uaserver_ips): Looking for #{private_ip}")
128
129 public_ips.each_index { |index|
130 node_private_ip = HelperFunctions.convert_fqdn_to_ip(private_ips[index])
131@@ -874,7 +876,7 @@
132 neptune_job_data=NeptuneJobData)
133
134 if !file.exists?(file_to_load)
135- djinn.log_debug("No neptune data found - no need to restore")
136+ djinn.log_debug("Djinn (load_neptune_info) No neptune data found - no need to restore")
137 return
138 end
139
140@@ -962,11 +964,11 @@
141 response = Net::HTTP.get_response(URI.parse(repo_url))
142 data = JSON.load(response.body)
143 rescue Exception => e
144- Djinn.log_debug("Update API status saw exception #{e.class}")
145+ Djinn.log_debug("EXCEPTION for update_api_status connecting to the repo localhost:8079: #{e.class}. Check to see if the repo is up.")
146 data = {}
147 end
148
149- Djinn.log_debug("Data received is #{data.inspect}")
150+ Djinn.log_debug("Djinn (update_api_status) Data received for API status is #{data.inspect}")
151
152 majorities = {}
153
154@@ -1228,8 +1230,10 @@
155 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, UA_SERVER_PORT)
156 end
157
158+
159 start_blobstore_server if my_node.is_appengine?
160
161+ start_rabbitmq_server
162 # for neptune jobs, start a place where they can save output to
163 # also, since repo does health checks on the app engine apis, start it up there too
164
165@@ -1259,6 +1263,17 @@
166 BlobServer.is_running(db_local_ip)
167 end
168
169+ def start_rabbitmq_server
170+ # The master is run on the shadow, and all slaves are on
171+ # app engine nodes
172+ if my_node.is_shadow?
173+ # The secret provides a unique identifier for rabbitmq
174+ RabbitMQ.start_master()
175+ elsif my_node.is_appengine? or my_node.is_rabbitmq_slave?
176+ # All slaves connect to the master to start
177+ RabbitMQ.start_slave(get_shadow.private_ip)
178+ end
179+ end
180
181 def start_soap_server
182 db_master_ip = nil
183
184=== modified file 'AppController/lib/djinn_job_data.rb'
185--- AppController/lib/djinn_job_data.rb 2012-02-01 19:17:26 +0000
186+++ AppController/lib/djinn_job_data.rb 2012-02-16 06:02:21 +0000
187@@ -39,8 +39,10 @@
188 appscale_jobs += ["login"]
189 appscale_jobs += ["memcache"]
190 appscale_jobs += ["open"]
191+ appscale_jobs += ["rabbitmq_slave"]
192 appscale_jobs += ["babel_master", "babel_slave"]
193 appscale_jobs += ["appengine"] # appengine must go last
194+
195
196 appscale_jobs.each { |job|
197 @jobs << job if roles.include?(job)
198@@ -67,7 +69,7 @@
199 def serialize
200 keyname = @ssh_key.split("/")[-1]
201 serialized = "#{@public_ip}:#{@private_ip}:#{@jobs.join(':')}:#{@instance_id}:#{@cloud}:#{keyname}"
202- Djinn.log_debug("serialized myself to #{serialized}")
203+ Djinn.log_debug("Serialized current node to #{serialized}")
204 return serialized
205 end
206
207@@ -76,7 +78,7 @@
208 split_data = serialized.split(":")
209 roles = split_data[0..-2].join(":")
210 keyname = split_data[-1].split(".")[0]
211- Djinn.log_debug("i'm pretty sure roles is [#{roles}] and keyname is [#{keyname}]")
212+ Djinn.log_debug("Current roles are [#{roles}] and keyname is [#{keyname}]")
213 return DjinnJobData.new(roles, keyname)
214 end
215
216@@ -86,6 +88,7 @@
217 else
218 jobs = @jobs.join(', ')
219 end
220+
221
222 status = "Node in cloud #{@cloud} with instance id #{@instance_id}" +
223 " responds to ssh key #{@ssh_key}, has pub IP #{@public_ip}," +
224
225=== modified file 'AppController/lib/haproxy.rb'
226--- AppController/lib/haproxy.rb 2011-05-16 07:56:58 +0000
227+++ AppController/lib/haproxy.rb 2012-02-16 06:02:21 +0000
228@@ -209,15 +209,15 @@
229 # any Mongrel, not just the one that started the session
230 option redispatch
231
232- # Timeout a request if the client did not read any data for 120 seconds
233- timeout client 30000
234+ # Timeout a request if the client did not read any data for 60 seconds
235+ timeout client 60000
236
237- # Timeout a request if Mongrel does not accept a connection for 30 seconds
238- timeout connect 30000
239+ # Timeout a request if Mongrel does not accept a connection for 60 seconds
240+ timeout connect 60000
241
242 # Timeout a request if Mongrel does not accept the data on the connection,
243- # or does not send a response back in 120 seconds
244- timeout server 30000
245+ # or does not send a response back in 60 seconds
246+ timeout server 60000
247
248 # Enable the statistics page
249 stats enable
250
251=== modified file 'AppController/lib/nginx.rb'
252--- AppController/lib/nginx.rb 2011-06-23 01:38:14 +0000
253+++ AppController/lib/nginx.rb 2012-02-16 06:02:21 +0000
254@@ -89,9 +89,9 @@
255 proxy_redirect off;
256 proxy_pass http://gae_#{app_name};
257 client_max_body_size 2G;
258- proxy_connect_timeout 30;
259- client_body_timeout 30;
260- proxy_read_timeout 30;
261+ proxy_connect_timeout 60;
262+ client_body_timeout 60;
263+ proxy_read_timeout 60;
264 }
265
266 location /404.html {
267@@ -178,9 +178,9 @@
268 proxy_redirect off;
269 proxy_pass http://#{PbServer.name};
270 client_max_body_size 30M;
271- proxy_connect_timeout 30;
272- client_body_timeout 30;
273- proxy_read_timeout 30;
274+ proxy_connect_timeout 60;
275+ client_body_timeout 60;
276+ proxy_read_timeout 60;
277 }
278
279 }
280@@ -322,7 +322,7 @@
281 #tcp_nopush on;
282
283 #keepalive_timeout 0;
284- keepalive_timeout 30;
285+ keepalive_timeout 60;
286 tcp_nodelay on;
287 server_names_hash_bucket_size 128;
288
289
290=== modified file 'AppController/lib/pbserver.rb'
291--- AppController/lib/pbserver.rb 2011-05-19 03:07:09 +0000
292+++ AppController/lib/pbserver.rb 2012-02-16 06:02:21 +0000
293@@ -17,7 +17,7 @@
294 LISTEN_PORT = 8888
295 LISTEN_SSL_PORT = 8443
296 DBS_NEEDING_ONE_PBSERVER = ["mysql"]
297- DBS_WITH_NATIVE_TRANSACTIONS = ["mysql"]
298+ DBS_WITH_NATIVE_PBSERVER = ["mysql"]
299
300 def self.start(master_ip, db_local_ip, my_ip, table, zklocations)
301 pbserver = self.pb_script(table)
302@@ -87,8 +87,8 @@
303 end
304
305 def self.pb_script(table)
306- if DBS_WITH_NATIVE_TRANSACTIONS.include?(table)
307- return "#{APPSCALE_HOME}/AppDB/appscale_server_native_trans.py"
308+ if DBS_WITH_NATIVE_PBSERVER.include?(table)
309+ return "#{APPSCALE_HOME}/AppDB/appscale_server_#{table}.py"
310 else
311 return "#{APPSCALE_HOME}/AppDB/appscale_server.py"
312 end
313
314=== added file 'AppController/lib/rabbitmq.rb'
315--- AppController/lib/rabbitmq.rb 1970-01-01 00:00:00 +0000
316+++ AppController/lib/rabbitmq.rb 2012-02-16 06:02:21 +0000
317@@ -0,0 +1,58 @@
318+#!/usr/bin/ruby -w
319+
320+$:.unshift File.join(File.dirname(__FILE__))
321+require 'djinn_job_data'
322+require 'helperfunctions'
323+RABBITMQ_SERVER_PORT = 5672
324+# A class to wrap all the interactions with the ejabberd xmpp server
325+class RabbitMQ
326+ RABBIT_PATH = File.join("/", "etc", "rabbitmq-server")
327+
328+ def self.start_master()
329+ Djinn.log_debug("Starting Rabbit Master")
330+ set_cookie()
331+ clean_start
332+ start_cmd = "rabbitmq-server -detached -setcookie #{HelperFunctions.get_secret()} && rabbitmqctl reset;"
333+ stop_cmd = "rabbitmqctl stop"
334+
335+ Djinn.log_debug(`#{start_cmd}`)
336+ end
337+
338+ # Master IP is who to join
339+ def self.start_slave(master_ip)
340+ Djinn.log_debug("Starting Rabbit Slave")
341+ set_cookie()
342+ clean_start
343+ #wait_till_port on master node
344+ HelperFunctions.sleep_until_port_is_open("appscale-image0", RABBITMQ_SERVER_PORT)
345+ # start the server, reset it to join the head node
346+
347+ start_cmd = ["rabbitmq-server -detached -setcookie #{HelperFunctions.get_secret()}",
348+ "rabbitmqctl start_app",
349+ "rabbitmqctl stop_app",
350+ "rabbitmqctl reset", # this resets the node
351+ "rabbitmqctl cluster rabbit@appscale-image0",
352+ "rabbitmqctl start_app"]
353+ start_cmd = "#{start_cmd.join('; ')}"
354+ stop_cmd = "rabbitmqctl stop"
355+
356+ Djinn.log_debug(`#{start_cmd}`)
357+ HelperFunctions.sleep_until_port_is_open("localhost", RABBITMQ_SERVER_PORT)
358+ end
359+
360+
361+ def self.stop
362+ Djinn.log_debug("Shutting down rabbitmq")
363+ end
364+
365+ def self.set_cookie()
366+ cookie_file = "/var/lib/rabbitmq/.erlang.cookie"
367+ File.open(cookie_file, 'w') {|f| f.write(HelperFunctions.get_secret()) }
368+ end
369+
370+ def self.clean_start
371+ Djinn.log_debug(`rm -rf /var/log/rabbitmq/*`)
372+ Djinn.log_debug(`rm -rf /var/lib/rabbitmq/mnesia/*`)
373+ end
374+
375+end
376
377=== modified file 'AppController/terminate.rb'
378--- AppController/terminate.rb 2011-05-26 20:35:46 +0000
379+++ AppController/terminate.rb 2012-02-16 06:02:21 +0000
380@@ -135,6 +135,7 @@
381 "beam", "epmd",
382 # Voldemort
383 "VoldemortServer",
384+ "rabbitmq",
385 # these are too general to kill
386 # "java", "python", "python2.6", "python2.5",
387 "thin", "god", "djinn", "xmpp_receiver"
388
389=== modified file 'AppDB/appscale_server.py'
390--- AppDB/appscale_server.py 2011-09-06 00:41:46 +0000
391+++ AppDB/appscale_server.py 2012-02-16 06:02:21 +0000
392@@ -150,8 +150,13 @@
393 self.timeTaken = 0
394 def run(self):
395 s = time.time()
396- self.err, self.ret = self.db.put_entity(self.table, self.key,
397+ ret = self.db.put_entity(self.table, self.key,
398 self.fields, self.values)
399+ if len(ret) > 1:
400+ self.err, self.ret = ret
401+ else:
402+ self.err = ret[0]
403+
404 self.timeTaken = time.time() - s
405
406
407@@ -431,7 +436,7 @@
408 print "errcode:",errcode
409 print "errdetail:",errdetail
410 self.write( apiresponse.Encode() )
411-
412+ del apiresponse
413
414 def _getGlobalStat(self):
415 global_stat_entity=datastore.Entity("__Stat_Total__", id=1)
416@@ -764,6 +769,7 @@
417 clone_qr_pb.clear_cursor()
418 clone_qr_pb.set_more_results( len(results)>0 )
419 #logger.debug("QUERY_RESULT: %s" % clone_qr_pb)
420+ del results
421 return (clone_qr_pb.Encode(), 0, "")
422
423
424@@ -1205,7 +1211,6 @@
425 putresp_pb.key_list().append(e.key())
426
427 if PROFILE: appscale_log.write("TOTAL %d %f\n"%(txn.handle(), time.time() - start))
428-
429 return (putresp_pb.Encode(), 0, "")
430
431
432
433=== renamed file 'AppDB/appscale_server_native_trans.py' => 'AppDB/appscale_server_mysql.py'
434--- AppDB/appscale_server_native_trans.py 2011-09-09 23:51:51 +0000
435+++ AppDB/appscale_server_mysql.py 2012-02-16 06:02:21 +0000
436@@ -58,6 +58,7 @@
437 SECRET_LOCATION = "/etc/appscale/secret.key"
438 VALID_DATASTORES = []
439 ERROR_CODES = []
440+ID_KEY_LENGTH = 64
441 app_datastore = []
442 logOn = False
443 logFilePtr = ""
444@@ -442,6 +443,9 @@
445 indexes = datastore_pb.CompositeIndices(index_proto)
446 for index in indexes.index_list():
447 index_map.setdefault(index.definition().entity_type(), []).append(index)
448+ # TODO(nchohan)
449+ # Looks like we are not storing index info in self.__indexes
450+ # When GetIndicies are called it should return the indexes for said app
451 self.__connection_lock.release()
452 def Clear(self):
453 pass
454@@ -937,14 +941,59 @@
455 cursor.execute("SELECT RELEASE_LOCK('%s');" % lock_str)
456 self.__connection.commit()
457
458+ def __getRootKey(app_id, ancestor_list):
459+ key = app_id # mysql cannot have \ as the first char in the row key
460+ a = ancestor_list[0]
461+ key += "/"
462+
463+ # append _ if the name is a number, prevents collisions of key names
464+ if a.has_type():
465+ key += a.type()
466+ else:
467+ return None
468+
469+ if a.has_id():
470+ zero_padded_id = ("0" * (ID_KEY_LENGTH - len(str(a.id())))) + str(a.id())
471+ key += ":" + zero_padded_id
472+ elif a.has_name():
473+ if a.name().isdigit():
474+ key += ":__key__" + a.name()
475+ else:
476+ key += ":" + a.name()
477+ else:
478+ return None
479+
480+ return key
481+
482 @staticmethod
483- def __ExtractEntityGroupFromKeys(keys):
484+ def __ExtractEntityGroupFromKeys(app_id, keys):
485 """Extracts entity group."""
486-
487- types = set([k.path().element_list()[-1].type() for k in keys])
488- assert len(types) == 1
489-
490- return types.pop()
491+ path = keys[0].path()
492+ element_list = path.element_list()
493+ def __getRootKey(app_id, ancestor_list):
494+ key = app_id # mysql cannot have \ as the first char in the row key
495+ a = ancestor_list[0]
496+ key += "/"
497+
498+ # append _ if the name is a number, prevents collisions of key names
499+ if a.has_type():
500+ key += a.type()
501+ else:
502+ return None
503+
504+ if a.has_id():
505+ zero_padded_id = ("0" * (ID_KEY_LENGTH - len(str(a.id())))) + str(a.id())
506+ key += ":" + zero_padded_id
507+ elif a.has_name():
508+ if a.name().isdigit():
509+ key += ":__key__" + a.name()
510+ else:
511+ key += ":" + a.name()
512+ else:
513+ return None
514+
515+ return key
516+ return __getRootKey(app_id, element_list)
517
518 def AssertPbIsInitialized(self, pb):
519 """Raises an exception if the given PB is not initialized and valid."""
520@@ -972,9 +1021,9 @@
521 entities = put_request.entity_list()
522 keys = [e.key() for e in entities]
523 if put_request.has_transaction():
524- entity_group = self.__ExtractEntityGroupFromKeys(keys)
525- txn_id = put_request.transaction().handle()
526- self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
527+ entity_group = self.__ExtractEntityGroupFromKeys(app_id, keys)
528+ txn_id = put_request.transaction().handle()
529+ self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
530 for entity in entities:
531 self.__ValidateKey(entity.key())
532
533@@ -1004,6 +1053,8 @@
534
535 self.__PutEntities(conn, entities)
536 put_response.key_list().extend([e.key() for e in entities])
537+ except Exception, e:
538+ print str(e)
539 finally:
540 if not put_request.has_transaction():
541 self.__ReleaseConnection(conn)
542@@ -1013,7 +1064,7 @@
543 try:
544 keys = get_request.key_list()
545 if get_request.has_transaction():
546- entity_group = self.__ExtractEntityGroupFromKeys(keys)
547+ entity_group = self.__ExtractEntityGroupFromKeys(app_id, keys)
548 txn_id = get_request.transaction().handle()
549 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
550 for key in keys:
551@@ -1036,7 +1087,7 @@
552 try:
553 keys = delete_request.key_list()
554 if delete_request.has_transaction():
555- entity_group = self.__ExtractEntityGroupFromKeys(keys)
556+ entity_group = self.__ExtractEntityGroupFromKeys(app_id, keys)
557 txn_id = delete_request.transaction().handle()
558 self.__AcquireLockForEntityGroup(app_id, conn, txn_id, entity_group)
559 self.__DeleteEntities(conn, delete_request.key_list())
560@@ -1525,11 +1576,18 @@
561 'New index id must be 0.')
562
563 self.__index_lock.acquire()
564+
565+ # If it already exists, just return the index id
566+ if self.__FindIndex(index):
567+ self.__index_lock.release()
568+ id_response.set_value(self.__FindIndex(index))
569+ return
570+
571 try:
572- if self.__FindIndex(index):
573- raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
574- 'Index already exists.')
575-
576+ #raise apiproxy_errors.ApplicationError(
577+ # datastore_pb.Error.PERMISSION_DENIED,
578+ # 'Index already exists.')
579+
580 next_id = max([idx.id() for x in self.__indexes.get(app_id, {}).values()
581 for idx in x] + [0]) + 1
582 index.set_id(next_id)
583@@ -1544,6 +1602,8 @@
584 self.__WriteIndexData(conn, app_id)
585 finally:
586 self.__ReleaseConnection(conn)
587+ except Exception, e:
588+ print str(e)
589 finally:
590 self.__index_lock.release()
591
592@@ -1631,7 +1691,6 @@
593 apirequest.clear_request()
594 method = apirequest.method()
595 http_request_data = apirequest.request()
596-
597 if method == "Put":
598 response, errcode, errdetail = self.put_request(app_id,
599 http_request_data)
600@@ -1687,14 +1746,16 @@
601 self.write(apiresponse.Encode() )
602
603 def create_index(self, app_id, http_request_data):
604- index = entity_pb.Index(http_request_data)
605+ index = entity_pb.CompositeIndex(http_request_data)
606 integer = api_base_pb.Integer64Proto()
607 try:
608 app_datastore._Dynamic_CreateIndex(index, integer)
609 except Exception, e:
610+ print str(e)
611 return (api_base_pb.VoidProto().Encode(),
612 datastore_pb.Error.INTERNAL_ERROR,
613 str(e))
614+ return (integer.Encode(), 0, "")
615
616 def get_indices(self, app_id, http_request_data):
617 composite_indices = datastore_pb.CompositeIndices()
618@@ -1707,8 +1768,8 @@
619 return (composite_indices.Encode(), 0, "")
620
621 def update_index(self, app_id, http_request_data):
622- index = entity_pb.Index(http_request_data)
623- void_resp = api_base_pb.VoidProto().Encode()
624+ index = entity_pb.CompositeIndex(http_request_data)
625+ void_resp = api_base_pb.VoidProto()
626 try:
627 app_datastore._Dynamic_UpdateIndex(index, void_resp)
628 except Exception, e:
629@@ -1776,6 +1837,7 @@
630 txn_id = transaction_pb.handle()
631 commitres_pb = datastore_pb.CommitResponse()
632 try:
633+ zoo_keeper.releaseLock(app_id, txn_id)
634 app_datastore._Dynamic_Commit(app_id, transaction_pb, commitres_pb)
635 except:
636 return (commitres_pb.Encode(), datastore_pb.Error.PERMISSION_DENIED, "Unable to commit for this transaction")
637@@ -1785,9 +1847,12 @@
638 transaction_pb = datastore_pb.Transaction(http_request_data)
639 handle = transaction_pb.handle()
640 try:
641+ zoo_keeper.releaseLock(app_id, handle)
642 app_datastore._Dynamic_Rollback(app_id, transaction_pb, None)
643- except:
644+ except Exception, e:
645+ print str(e)
646 return(api_base_pb.VoidProto().Encode(), datastore_pb.Error.PERMISSION_DENIED, "Unable to rollback for this transaction")
647+ print "Transaction with handle %d was roll backed"%handle
648 return (api_base_pb.VoidProto().Encode(), 0, "")
649
650
651
652=== modified file 'AppDB/cassandra/cassandra_helper.rb'
653--- AppDB/cassandra/cassandra_helper.rb 2011-05-30 01:04:15 +0000
654+++ AppDB/cassandra/cassandra_helper.rb 2012-02-16 06:02:21 +0000
655@@ -15,11 +15,29 @@
656 return false
657 end
658
659+def get_local_token(master_ip, slave_ips)
660+ # Calculate everyone's token for data partitioning
661+ if master_ip == HelperFunctions.local_ip
662+ return 0
663+ end
664+
665+ for ii in 0..slave_ips.length
666+ # Based on local ip return the correct token
667+ # This token generation was taken from:
668+ # http://www.datastax.com/docs/0.8/install/cluster_init#cluster-init
669+ if slave_ips[ii] == HelperFunctions.local_ip
670+ # Add one to offset the master
671+ return (ii + 1)*(2**127)/(1 + slave_ips.length)
672+ end
673+ end
674+end
675+
676 def setup_db_config_files(master_ip, slave_ips, creds)
677 source_dir = "#{APPSCALE_HOME}/AppDB/cassandra/templates"
678 dest_dir = "#{APPSCALE_HOME}/AppDB/cassandra/cassandra/conf"
679
680 all_ips = [master_ip, slave_ips].flatten
681+ local_token = get_local_token(master_ip, slave_ips)
682
683 files_to_config = `ls #{source_dir}`.split
684 files_to_config.each{ |filename|
685@@ -29,6 +47,7 @@
686 contents = source_file.read
687 contents.gsub!(/APPSCALE-LOCAL/, HelperFunctions.local_ip)
688 contents.gsub!(/APPSCALE-MASTER/, master_ip)
689+ contents.gsub!(/APPSCALE-TOKEN/, "#{local_token}")
690 contents.gsub!(/REPLICATION/, creds["replication"])
691 contents.gsub!(/APPSCALE-JMX-PORT/, "7070")
692 File.open(full_path_to_write, "w+") { |dest_file|
693@@ -36,16 +55,16 @@
694 }
695 }
696 }
697+
698 end
699-
700 def start_db_master()
701 @state = "Starting up Cassandra on the head node"
702 Djinn.log_debug("Starting up Cassandra as master")
703
704- Djinn.log_debug(`pkill ThriftBroker`)
705+ Djinn.log_run("pkill ThriftBroker")
706 `rm -rf /var/appscale/cassandra*`
707-
708- Djinn.log_debug(`#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid`)
709+ `rm /var/log/appscale/cassandra/system.log`
710+ Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid")
711 HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, 9160)
712 end
713
714@@ -54,10 +73,12 @@
715 Djinn.log_debug("Starting up Cassandra as slave")
716
717 HelperFunctions.sleep_until_port_is_open(Djinn.get_db_master_ip, 9160)
718-
719+ sleep(5)
720 `rm -rf /var/appscale/cassandra*`
721- Djinn.log_debug(`#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid`)
722- HelperFunctions.sleep_until_port_is_open(Djinn.get_db_master_ip, 9160)
723+ `rm /var/log/appscale/cassandra/system.log`
724+ `#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid`
725+ #Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/cassandra start -p /var/appscale/appscale-cassandra.pid")
726+ HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, 9160)
727 end
728
729 def stop_db_master
730@@ -70,3 +91,4 @@
731 Djinn.log_run("#{APPSCALE_HOME}/AppDB/cassandra/cassandra/bin/nodetool decommission -h #{HelperFunctions.local_ip} -p 6666")
732 Djinn.log_run("cat /var/appscale/appscale-cassandra.pid | xargs kill -9")
733 end
734+
735
736=== modified file 'AppDB/cassandra/prime_cassandra.py'
737--- AppDB/cassandra/prime_cassandra.py 2011-05-29 21:41:44 +0000
738+++ AppDB/cassandra/prime_cassandra.py 2012-02-16 06:02:21 +0000
739@@ -18,19 +18,19 @@
740 except pycassa.cassandra.ttypes.InvalidRequestException, e:
741 pass
742
743- sys.create_keyspace('Keyspace1', replication)
744- sys.create_column_family('Keyspace1', 'Standard1',
745- comparator_type=UTF8_TYPE)
746- sys.create_column_family('Keyspace1', 'Standard2',
747- comparator_type=UTF8_TYPE)
748- sys.create_column_family('Keyspace1', 'StandardByTime1',
749- comparator_type=TIME_UUID_TYPE)
750- sys.create_column_family('Keyspace1', 'StandardByTime2',
751- comparator_type=TIME_UUID_TYPE)
752- sys.create_column_family('Keyspace1', 'Super1', super=True,
753- comparator_type=UTF8_TYPE)
754- sys.create_column_family('Keyspace1', 'Super2', super=True,
755- comparator_type=UTF8_TYPE)
756+ sys.create_keyspace('Keyspace1', pycassa.SIMPLE_STRATEGY, {'replication_factor':str(replication)})
757+ sys.create_column_family('Keyspace1', 'Standard1', #column_type="Standard",
758+ comparator_type=UTF8_TYPE)
759+ sys.create_column_family('Keyspace1', 'Standard2', #column_type="Standard",
760+ comparator_type=UTF8_TYPE)
761+ sys.create_column_family('Keyspace1', 'StandardByTime1', #column_type="Standard",
762+ comparator_type=TIME_UUID_TYPE)
763+ sys.create_column_family('Keyspace1', 'StandardByTime2', #column_type="Standard",
764+ comparator_type=TIME_UUID_TYPE)
765+ #sys.create_column_family('Keyspace1', 'Super1', column_type="Super",
766+ # comparator_type=UTF8_TYPE)
767+ #sys.create_column_family('Keyspace1', 'Super2', column_type="Super",
768+ # comparator_type=UTF8_TYPE)
769 sys.close()
770 print "SUCCESS"
771
772@@ -41,7 +41,6 @@
773 #print db.get("__keys_")
774 db.create_table(USERS_TABLE, USERS_SCHEMA)
775 db.create_table(APPS_TABLE, APPS_SCHEMA)
776-
777 if len(db.get_schema(USERS_TABLE)) > 1 and len(db.get_schema(APPS_TABLE)) > 1:
778 print "CREATE TABLE SUCCESS FOR USER AND APPS"
779 print db.get_schema(USERS_TABLE)
780
781=== modified file 'AppDB/cassandra/py_cassandra.py'
782--- AppDB/cassandra/py_cassandra.py 2011-05-31 00:01:41 +0000
783+++ AppDB/cassandra/py_cassandra.py 2012-02-16 06:02:21 +0000
784@@ -1,36 +1,31 @@
785 #
786 # Cassandra Interface for AppScale
787-# Rewritten by Navraj Chohan for using range queries
788-# Modified by Chris Bunch for upgrade to Cassandra 0.50.0
789-# on 2/17/10
790+# Rewritten by Navraj Chohan for pycassa
791+# Modified by Chris Bunch for upgrade to Cassandra 0.50.0 # on 2/17/10
792 # Original author: suwanny@gmail.com
793
794 import os,sys
795 import time
796-
797-from thrift_cass.Cassandra import Client
798-from thrift_cass.ttypes import *
799-
800 import string
801-import base64 # base64 2009.04.16
802+import base64
803 from dbconstants import *
804 from dbinterface import *
805-#import sqlalchemy.pool as pool
806 import appscale_logger
807 import pycassa
808+from pycassa.system_manager import *
809 from thrift import Thrift
810 from thrift.transport import TSocket
811 from thrift.transport import TTransport
812 from thrift.protocol import TBinaryProtocol
813+from pycassa.cassandra.ttypes import NotFoundException
814+
815 ERROR_DEFAULT = "DB_ERROR:" # ERROR_CASSANDRA
816 # Store all schema information in a special table
817 # If a table does not show up in this table, try a range query
818 # to discover it's schema
819 SCHEMA_TABLE = "__key__"
820 SCHEMA_TABLE_SCHEMA = ['schema']
821-# use 1 Table and 1 ColumnFamily in Cassandra
822 MAIN_TABLE = "Keyspace1"
823-COLUMN_FAMILY = "Standard1"
824
825 PERSISTENT_CONNECTION = False
826 PROFILING = False
827@@ -50,9 +45,20 @@
828 f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
829 self.host = f.read()
830 self.port = DEFAULT_PORT
831- #self.pool = pool.QueuePool(self.__create_connection, reset_on_return=False)
832- #connection.add_pool('AppScale', [self.host, self.port])
833- self.pool = pycassa.ConnectionPool(keyspace='Keyspace1', server_list=[self.host+":"+str(self.port)], prefill=False)
834+ self.pool = pycassa.ConnectionPool(keyspace='Keyspace1',
835+ server_list=[self.host+":"+str(self.port)],
836+ prefill=False)
837+ f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
838+ host = f.read()
839+ sys = SystemManager(host + ":" + str(DEFAULT_PORT))
840+ try:
841+ sys.create_column_family('Keyspace1',
842+ SCHEMA_TABLE,
843+ comparator_type=UTF8_TYPE)
844+ except Exception, e:
845+ print "Exception creating column family: %s"%str(e)
846+ pass
847+
848 self.logger = logger
849
850 def logTiming(self, function, start_time, end_time):
851@@ -62,30 +68,21 @@
852 def get_entity(self, table_name, row_key, column_names):
853 error = [ERROR_DEFAULT]
854 list = error
855- client = None
856 row_key = table_name + '/' + row_key
857 try:
858- slice_predicate = SlicePredicate(column_names=column_names)
859- path = ColumnPath(COLUMN_FAMILY)
860- client = self.__setup_connection()
861- # Result is a column type which has name, value, timestamp
862- result = client.get_slice(row_key, path, slice_predicate,
863- CONSISTENCY_QUORUM)
864+ cf = pycassa.ColumnFamily(self.pool,
865+ string.replace(table_name, '-','a'))
866+ result = cf.get(row_key, columns=column_names)
867+ # Order entities by column_names
868 for column in column_names:
869- for r in result:
870- c = r.column
871- if column == c.name:
872- list.append(c.value)
873- except NotFoundException: # occurs normally if the item isn't in the db
874+ list.append(result[column])
875+ except NotFoundException:
876 list[0] += "Not found"
877- self.__close_connection(client)
878 return list
879 except Exception, ex:
880- #self.logger.debug("Exception %s" % ex)
881 list[0]+=("Exception: %s"%ex)
882- self.__close_connection(client)
883 return list
884- self.__close_connection(client)
885+
886 if len(list) == 1:
887 list[0] += "Not found"
888 return list
889@@ -93,34 +90,25 @@
890 def put_entity(self, table_name, row_key, column_names, cell_values):
891 error = [ERROR_DEFAULT]
892 list = error
893- client = None
894
895 # The first time a table is seen
896 if table_name not in table_cache:
897 self.create_table(table_name, column_names)
898
899 row_key = table_name + '/' + row_key
900- client = self.__setup_connection()
901- curtime = self.timestamp()
902- # Result is a column type which has name, value, timestamp
903- mutations = []
904+ cell_dict = {}
905 for index, ii in enumerate(column_names):
906- column = Column(name = ii, value=cell_values[index],
907- timestamp=curtime)
908- c_or_sc = ColumnOrSuperColumn(column=column)
909- mutation = Mutation(column_or_supercolumn=c_or_sc)
910- mutations.append(mutation)
911- mutation_map = {row_key : { COLUMN_FAMILY : mutations } }
912- client.batch_mutate(mutation_map, CONSISTENCY_QUORUM)
913- """except Exception, ex:
914- print "EXCEPTION"
915- self.logger.debug("Exception %s" % ex)
916- list[0]+=("Exception: %s"%ex)
917- self.__close_connection(client)
918- list.append("0")
919+ cell_dict[ii] = cell_values[index]
920+
921+ try:
922+ # cannot have "-" in the column name
923+ cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
924+ except NotFoundException:
925+ print "Unable to find column family"
926+ list[0]+=("Exception: Column family not found")
927 return list
928- """
929- self.__close_connection(client)
930+
931+ cf.insert(row_key, cell_dict)
932 list.append("0")
933 return list
934
935@@ -130,18 +118,19 @@
936
937 def get_table(self, table_name, column_names):
938 error = [ERROR_DEFAULT]
939- client = None
940 result = error
941 keyslices = []
942 start_key = table_name + "/"
943 end_key = table_name + '/~'
944 try:
945- cf = pycassa.ColumnFamily(self.pool, 'Standard1')
946+ cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
947 keyslices = cf.get_range(columns=column_names,
948 start=start_key,
949- finish=end_key,
950- read_consistency_level=CONSISTENCY_QUORUM)
951+ finish=end_key)
952 keyslices = list(keyslices)
953+ except pycassa.NotFoundException, ex:
954+ self.logger.debug("No column fam yet--exception %s" % ex)
955+ return result
956 except Exception, ex:
957 self.logger.debug("Exception %s" % ex)
958 result[0] += "Exception: " + str(ex)
959@@ -166,21 +155,15 @@
960 def delete_row(self, table_name, row_key):
961 error = [ERROR_DEFAULT]
962 ret = error
963- client = None
964 row_key = table_name + '/' + row_key
965- path = ColumnPath(COLUMN_FAMILY)
966 try:
967- client = self.__setup_connection()
968- curtime = self.timestamp()
969+ cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
970 # Result is a column type which has name, value, timestamp
971- client.remove(row_key, path, curtime,
972- CONSISTENCY_QUORUM)
973+ cf.remove(row_key)
974 except Exception, ex:
975 self.logger.debug("Exception %s" % ex)
976 ret[0]+=("Exception: %s"%ex)
977- self.__close_connection(client)
978 return ret
979- self.__close_connection(client)
980 ret.append("0")
981 return ret
982
983@@ -203,51 +186,47 @@
984 def delete_table(self, table_name):
985 error = [ERROR_DEFAULT]
986 result = error
987- client = None
988 keyslices = []
989- column_parent = ColumnParent(column_family="Standard1")
990- predicate = SlicePredicate(column_names=[])
991 curtime = self.timestamp()
992- path = ColumnPath(COLUMN_FAMILY)
993 start_key = table_name + "/"
994 end_key = table_name + '/~'
995 try:
996- cf = pycassa.ColumnFamily(self.pool, 'Standard1')
997- keyslices = cf.get_range(columns=[],
998- start=start_key,
999- finish=end_key,
1000- read_consistency_level=CONSISTENCY_QUORUM)
1001+ cf = pycassa.ColumnFamily(self.pool, string.replace(table_name, '-','a'))
1002+ cf.truncate()
1003+ self.delete_row(SCHEMA_TABLE, row_key)
1004 except Exception, ex:
1005 self.logger.debug("Exception %s" % ex)
1006 result[0]+=("Exception: %s"%ex)
1007 return result
1008- keys_removed = False
1009- for keyslice in keyslices:
1010- row_key = keyslice[0]
1011- client = self.__setup_connection()
1012- client.remove(row_key,
1013- path,
1014- curtime,
1015- CONSISTENCY_QUORUM)
1016- keys_removed = True
1017- if table_name not in table_cache and keys_removed:
1018+ if table_name not in table_cache:
1019 result[0] += "Table does not exist"
1020 return result
1021 if table_name in table_cache:
1022 del table_cache[table_name]
1023- if client:
1024- self.__close_connection(client)
1025 return result
1026
1027 # Only stores the schema
1028 def create_table(self, table_name, column_names):
1029- table_cache[table_name] = 1
1030 columns = ':'.join(column_names)
1031 row_key = table_name
1032+ print "CREATE TABLE NAME: " + table_name
1033 # Get and make sure we are not overwriting previous schemas
1034 ret = self.get_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA)
1035+ print ret
1036 if ret[0] != ERROR_DEFAULT:
1037- self.put_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA, [columns])
1038+ f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
1039+ host = f.read()
1040+ sysman = SystemManager(host + ":" + str(DEFAULT_PORT))
1041+ print "Creating column family %s"%table_name
1042+ try:
1043+ sysman.create_column_family('Keyspace1', string.replace(table_name, '-','a'), comparator_type=UTF8_TYPE)
1044+ print "Done creating column family"
1045+ self.put_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA, [columns])
1046+ except Exception, e:
1047+ print "Unable to create column family %s"%str(e)
1048+ return
1049+
1050+ table_cache[table_name] = 1
1051
1052 ######################################################################
1053 # private methods
1054
1055=== added file 'AppDB/cassandra/templates/brisk'
1056--- AppDB/cassandra/templates/brisk 1970-01-01 00:00:00 +0000
1057+++ AppDB/cassandra/templates/brisk 2012-02-16 06:02:21 +0000
1058@@ -0,0 +1,16 @@
1059+# NOTICE: See also /etc/brisk/cassandra/cassandra-env.sh
1060+
1061+# EXTRA_CLASSPATH provides the means to extend Cassandra's classpath with
1062+# additional libraries. It is formatted as a colon-delimited list of
1063+# class directories and/or jar files. For example, to enable the
1064+# JMX-to-web bridge install libmx4j-java and uncomment the following.
1065+#EXTRA_CLASSPATH="/usr/share/java/mx4j-tools.jar"
1066+
1067+# enable this start also start Hadoop's JobTracker and/or TaskTrackers on this
1068+# machine. If left disabled, this will act as a regular Cassandra node.
1069+HADOOP_ENABLED=1
1070+
1071+# enable this to set the replication factor for CFS. Note that this will only
1072+# have an effect the first time a cluster is started with HADOOP_ENABLED=1 and
1073+# after that will be a no-op. Defaults to 1.
1074+#CFS_REPLICATION_FACTOR=1
1075
1076=== modified file 'AppDB/cassandra/templates/cassandra.yaml'
1077--- AppDB/cassandra/templates/cassandra.yaml 2011-05-30 01:04:15 +0000
1078+++ AppDB/cassandra/templates/cassandra.yaml 2012-02-16 06:02:21 +0000
1079@@ -19,23 +19,15 @@
1080 # the heaviest-loaded existing node. If there is no load information
1081 # available, such as is the case with a new cluster, it will pick
1082 # a random token, which will lead to hot spots.
1083-initial_token:
1084-
1085-# Set to true to make new [non-seed] nodes automatically migrate data
1086-# to themselves from the pre-existing nodes in the cluster. Defaults
1087-# to false because you can only bootstrap N machines at a time from
1088-# an existing cluster of N, so if you are bringing up a cluster of
1089-# 10 machines with 3 seeds you would have to do it in stages. Leaving
1090-# this off for the initial start simplifies that.
1091-auto_bootstrap: false
1092+initial_token: APPSCALE-TOKEN
1093
1094 # See http://wiki.apache.org/cassandra/HintedHandoff
1095 hinted_handoff_enabled: true
1096 # this defines the maximum amount of time a dead host will have hints
1097 # generated. After it has been dead this long, hints will be dropped.
1098 max_hint_window_in_ms: 3600000 # one hour
1099-# Sleep this long after delivering each row or row fragment
1100-hinted_handoff_throttle_delay_in_ms: 50
1101+# Sleep this long after delivering each hint
1102+hinted_handoff_throttle_delay_in_ms: 1
1103
1104 # authentication backend, implementing IAuthenticator; used to identify users
1105 authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
1106@@ -64,10 +56,10 @@
1107 # ordering. Use this as an example if you need custom collation.
1108 #
1109 # See http://wiki.apache.org/cassandra/Operations for more on
1110-# partitioners anv toke liblib libselection.
1111+# partitioners and token selection.
1112 partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
1113
1114-# directories where Cassandra should libstore data on disk.
1115+# directories where Cassandra should store data on disk.
1116 data_file_directories:
1117 - /var/appscale/cassandra/data
1118
1119@@ -77,21 +69,34 @@
1120 # saved caches
1121 saved_caches_directory: /var/appscale/cassandra/saved_caches
1122
1123-# Size to allow commitlog to grow to before creating a new segment
1124-commitlog_rotation_threshold_in_mb: 128
1125-
1126 # commitlog_sync may be either "periodic" or "batch."
1127 # When in batch mode, Cassandra won't ack writes until the commit log
1128 # has been fsynced to disk. It will wait up to
1129-# CommitLogSyncBatchWindowInMS milliseconds for other writes, before
1130+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
1131 # performing the sync.
1132-commitlog_sync: periodic
1133-
1134+#
1135+# commitlog_sync: batch
1136+# commitlog_sync_batch_window_in_ms: 50
1137+#
1138 # the other option is "periodic" where writes may be acked immediately
1139 # and the CommitLog is simply synced every commitlog_sync_period_in_ms
1140 # milliseconds.
1141+commitlog_sync: periodic
1142 commitlog_sync_period_in_ms: 10000
1143
1144+# any class that implements the SeedProvider interface and has a
1145+# constructor that takes a Map<String, String> of parameters will do.
1146+seed_provider:
1147+ # Addresses of hosts that are deemed contact points.
1148+ # Cassandra nodes use this list of hosts to find each other and learn
1149+ # the topology of the ring. You must change this if you are running
1150+ # multiple nodes!
1151+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
1152+ parameters:
1153+ # seeds is actually a comma-delimited list of addresses.
1154+ # Ex: "<ip1>,<ip2>,<ip3>"
1155+ - seeds: "APPSCALE-MASTER"
1156+
1157 # emergency pressure valve: each time heap usage after a full (CMS)
1158 # garbage collection is above this fraction of the max, Cassandra will
1159 # flush the largest memtables.
1160@@ -117,13 +122,6 @@
1161 reduce_cache_sizes_at: 0.85
1162 reduce_cache_capacity_to: 0.6
1163
1164-# Addresses of hosts that are deemed contact points.
1165-# Cassandra nodes use this list of hosts to find each other and learn
1166-# the topology of the ring. You must change this if you are running
1167-# multiple nodes!
1168-seeds:
1169- - APPSCALE-MASTER
1170-
1171 # For workloads with more data than can fit in memory, Cassandra's
1172 # bottleneck will be reads that need to fetch data from
1173 # disk. "concurrent_reads" should be set to (16 * number_of_drives) in
1174@@ -136,6 +134,17 @@
1175 concurrent_reads: 32
1176 concurrent_writes: 32
1177
1178+# Total memory to use for memtables. Cassandra will flush the largest
1179+# memtable when this much memory is used.
1180+# If omitted, Cassandra will set it to 1/3 of the heap.
1181+# memtable_total_space_in_mb: 2048
1182+
1183+# Total space to use for commitlogs.
1184+# If space gets above this value (it will round up to the next nearest
1185+# segment multiple), Cassandra will flush every dirty CF in the oldest
1186+# segment and remove it.
1187+# commitlog_total_space_in_mb: 4096
1188+
1189 # This sets the amount of memtable flush writer threads. These will
1190 # be blocked by disk io, and each one will hold a memtable in memory
1191 # while blocked. If you have a large heap and many data directories,
1192@@ -155,6 +164,10 @@
1193 # TCP port, for commands and data
1194 storage_port: 7000
1195
1196+# SSL port, for encrypted communication. Unused unless enabled in
1197+# encryption_options
1198+ssl_storage_port: 7001
1199+
1200 # Address to bind to and tell other Cassandra nodes to connect to. You
1201 # _must_ change this if you want multiple nodes to be able to
1202 # communicate!
1203@@ -167,29 +180,53 @@
1204 # Setting this to 0.0.0.0 is always wrong.
1205 listen_address: APPSCALE-LOCAL
1206
1207+# Address to broadcast to other Cassandra nodes
1208+# Leaving this blank will set it to the same value as listen_address
1209+# broadcast_address: 1.2.3.4
1210+
1211 # The address to bind the Thrift RPC service to -- clients connect
1212 # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
1213 # you want Thrift to listen on all interfaces.
1214 #
1215 # Leaving this blank has the same effect it does for ListenAddress,
1216 # (i.e. it will be based on the configured hostname of the node).
1217-rpc_address: APPSCALE-LOCAL
1218+rpc_address: 0.0.0.0
1219 # port for Thrift to listen for clients on
1220 rpc_port: 9160
1221
1222 # enable or disable keepalive on rpc connections
1223 rpc_keepalive: true
1224
1225-# Cassandra uses thread-per-client for client RPC. This can
1226-# be expensive in memory used for thread stack for a large
1227-# enough number of clients. (Hence, connection pooling is
1228-# very, very strongly recommended.)
1229-#
1230+# Cassandra provides three options for the RPC Server:
1231+#
1232+# sync -> One connection per thread in the rpc pool (see below).
1233+# For a very large number of clients, memory will be your limiting
1234+# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
1235+# Connection pooling is very, very strongly recommended.
1236+#
1237+# async -> Nonblocking server implementation with one thread to serve
1238+# rpc connections. This is not recommended for high throughput use
1239+# cases. Async has been tested to be about 50% slower than sync
1240+# or hsha and is deprecated: it will be removed in the next major release.
1241+#
1242+# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
1243+# (see below) is used to manage requests, but the threads are multiplexed
1244+# across the different clients.
1245+#
1246+# The default is sync because on Windows hsha is about 30% slower. On Linux,
1247+# sync/hsha performance is about the same, with hsha of course using less memory.
1248+rpc_server_type: sync
1249+
1250 # Uncomment rpc_min|max|thread to set request pool size.
1251-# You would primarily set max as a safeguard against misbehaved
1252-# clients; if you do hit the max, Cassandra will block until
1253-# one disconnects before accepting more. The defaults are
1254-# min of 16 and max unlimited.
1255+# You would primarily set max for the sync server to safeguard against
1256+# misbehaved clients; if you do hit the max, Cassandra will block until one
1257+# disconnects before accepting more. The defaults for sync are min of 16 and max
1258+# unlimited.
1259+#
1260+# For the Hsha server, the min and max both default to quadruple the number of
1261+# CPU cores.
1262+#
1263+# This configuration is ignored by the async server.
1264 #
1265 # rpc_min_threads: 16
1266 # rpc_max_threads: 2048
1267@@ -219,10 +256,6 @@
1268 # is a data format change.
1269 snapshot_before_compaction: false
1270
1271-# change this to increase the compaction thread's priority. In java, 1 is the
1272-# lowest priority and that is our default.
1273-# compaction_thread_priority: 1
1274-
1275 # Add column indexes to a row after its contents reach this size.
1276 # Increase if your column values are large, or if you have a very large
1277 # number of columns. The competing causes are, Cassandra has to
1278@@ -237,11 +270,48 @@
1279 # will be logged specifying the row key.
1280 in_memory_compaction_limit_in_mb: 64
1281
1282+# Number of simultaneous compactions to allow, NOT including
1283+# validation "compactions" for anti-entropy repair. Simultaneous
1284+# compactions can help preserve read performance in a mixed read/write
1285+# workload, by mitigating the tendency of small sstables to accumulate
1286+# during a single long running compactions. The default is usually
1287+# fine and if you experience problems with compaction running too
1288+# slowly or too fast, you should look at
1289+# compaction_throughput_mb_per_sec first.
1290+#
1291+# This setting has no effect on LeveledCompactionStrategy.
1292+#
1293+# concurrent_compactors defaults to the number of cores.
1294+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
1295+#concurrent_compactors: 1
1296+
1297+# Multi-threaded compaction. When enabled, each compaction will use
1298+# up to one thread per core, plus one thread per sstable being merged.
1299+# This is usually only useful for SSD-based hardware: otherwise,
1300+# your concern is usually to get compaction to do LESS i/o (see:
1301+# compaction_throughput_mb_per_sec), not more.
1302+multithreaded_compaction: false
1303+
1304+# Throttles compaction to the given total throughput across the entire
1305+# system. The faster you insert data, the faster you need to compact in
1306+# order to keep the sstable count down, but in general, setting this to
1307+# 16 to 32 times the rate you are inserting data is more than sufficient.
1308+# Setting this to 0 disables throttling. Note that this account for all types
1309+# of compaction, including validation compaction.
1310+compaction_throughput_mb_per_sec: 16
1311+
1312 # Track cached row keys during compaction, and re-cache their new
1313 # positions in the compacted sstable. Disable if you use really large
1314 # key caches.
1315 compaction_preheat_key_cache: true
1316
1317+# Throttles all outbound streaming file transfers on this node to the
1318+# given total throughput in Mbps. This is necessary because Cassandra does
1319+# mostly sequential IO when streaming data during bootstrap or repair, which
1320+# can lead to saturating the network connection and degrading rpc performance.
1321+# When unset, the default is 400 Mbps or 50 MB/s.
1322+# stream_throughput_outbound_megabits_per_sec: 400
1323+
1324 # Time to wait for a reply from other nodes before failing the command
1325 rpc_timeout_in_ms: 10000
1326
1327@@ -265,11 +335,6 @@
1328 # explicitly configured in cassandra-topology.properties.
1329 endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
1330
1331-# dynamic_snitch -- This boolean controls whether the above snitch is
1332-# wrapped with a dynamic snitch, which will monitor read latencies
1333-# and avoid reading from hosts that have slowed (due to compaction,
1334-# for instance)
1335-dynamic_snitch: true
1336 # controls how often to perform the more expensive part of host score
1337 # calculation
1338 dynamic_snitch_update_interval_in_ms: 100
1339@@ -283,7 +348,7 @@
1340 # expressed as a double which represents a percentage. Thus, a value of
1341 # 0.2 means Cassandra would continue to prefer the static snitch values
1342 # until the pinned host was 20% worse than the fastest.
1343-dynamic_snitch_badness_threshold: 0.0
1344+dynamic_snitch_badness_threshold: 0.1
1345
1346 # request_scheduler -- Set this to a class that implements
1347 # RequestScheduler, which will schedule incoming client requests
1348@@ -325,32 +390,40 @@
1349 # the request scheduling. Currently the only valid option is keyspace.
1350 # request_scheduler_id: keyspace
1351
1352-# The Index Interval determines how large the sampling of row keys
1353-# is for a given SSTable. The larger the sampling, the more effective
1354-# the index is at the cost of space.
1355+# index_interval controls the sampling of entries from the primrary
1356+# row index in terms of space versus time. The larger the interval,
1357+# the smaller and less effective the sampling will be. In technicial
1358+# terms, the interval coresponds to the number of index entries that
1359+# are skipped between taking each sample. All the sampled entries
1360+# must fit in memory. Generally, a value between 128 and 512 here
1361+# coupled with a large key cache size on CFs results in the best trade
1362+# offs. This value is not often changed, however if you have many
1363+# very small rows (many to an OS page), then increasing this will
1364+# often lower memory usage without a impact on performance.
1365 index_interval: 128
1366
1367-
1368-#keyspaces:
1369-#- column_families:
1370-# - column_type: Standard
1371-# name: Standard1
1372-# - column_type: Standard
1373-# name: Standard2
1374-# - column_type: Standard
1375-# compare_with: org.apache.cassandra.db.marshal.TimeUUIDType
1376-# name: StandardByTime1
1377-# - column_type: Standard
1378-# compare_with: org.apache.cassandra.db.marshal.TimeUUIDType
1379-# name: StandardByTime2
1380-# - column_type: Super
1381-# name: Super1
1382-# - column_type: Super
1383-# name: Super2
1384-#
1385-# name: Keyspace1
1386-# replica_placement_strategy: org.apache.cassandra.locator.SimpleStrategy
1387-# replica_placement_factor: REPLICATION
1388-#
1389-#
1390-
1391+# Enable or disable inter-node encryption
1392+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
1393+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
1394+# suite for authentication, key exchange and encryption of the actual data transfers.
1395+# NOTE: No custom encryption options are enabled at the moment
1396+# The available internode options are : all, none, dc, rack
1397+#
1398+# If set to dc cassandra will encrypt the traffic between the DCs
1399+# If set to rack cassandra will encrypt the traffic between the racks
1400+#
1401+# The passwords used in these options must match the passwords used when generating
1402+# the keystore and truststore. For instructions on generating these files, see:
1403+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
1404+#
1405+encryption_options:
1406+ internode_encryption: none
1407+ keystore: conf/.keystore
1408+ keystore_password: cassandra
1409+ truststore: conf/.truststore
1410+ truststore_password: cassandra
1411+ # More advanced defaults below:
1412+ # protocol: TLS
1413+ # algorithm: SunX509
1414+ # store_type: JKS
1415+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
1416
1417=== modified file 'AppDB/cassandra/test_cassandra.py' (properties changed: +x to -x)
1418--- AppDB/cassandra/test_cassandra.py 2011-05-29 21:41:44 +0000
1419+++ AppDB/cassandra/test_cassandra.py 2012-02-16 06:02:21 +0000
1420@@ -1,286 +1,89 @@
1421-#
1422-# Cassandra Interface for AppScale
1423-# Rewritten by Navraj Chohan for using range queries
1424-# Modified by Chris Bunch for upgrade to Cassandra 0.50.0
1425-# on 2/17/10
1426-# Original author: suwanny@gmail.com
1427-
1428-import os,sys
1429-import time
1430-
1431-from thrift_cass.Cassandra import Client
1432-from thrift_cass.ttypes import *
1433-
1434-import string
1435-import base64 # base64 2009.04.16
1436-from dbconstants import *
1437-from dbinterface import *
1438-#import sqlalchemy.pool as pool
1439-import appscale_logger
1440-import pycassa
1441-from thrift import Thrift
1442-from thrift.transport import TSocket
1443-from thrift.transport import TTransport
1444-from thrift.protocol import TBinaryProtocol
1445-ERROR_DEFAULT = "DB_ERROR:" # ERROR_CASSANDRA
1446-# Store all schema information in a special table
1447-# If a table does not show up in this table, try a range query
1448-# to discover it's schema
1449-SCHEMA_TABLE = "__key__"
1450-SCHEMA_TABLE_SCHEMA = ['schema']
1451-# use 1 Table and 1 ColumnFamily in Cassandra
1452-MAIN_TABLE = "Keyspace1"
1453-COLUMN_FAMILY = "Standard1"
1454-
1455-PERSISTENT_CONNECTION = False
1456-PROFILING = False
1457-
1458-DEFAULT_HOST = "localhost"
1459-DEFAULT_PORT = 9160
1460-
1461-CONSISTENCY_ZERO = 0 # don't use this for reads
1462-CONSISTENCY_ONE = 1
1463-CONSISTENCY_QUORUM = 2
1464-CONSISTENCY_ALL = 5 # don't use this for reads (next version may fix this)
1465-
1466-MAX_ROW_COUNT = 10000000
1467-table_cache = {}
1468-class DatastoreProxy(AppDBInterface):
1469- def __init__(self, logger = appscale_logger.getLogger("datastore-cassandra")):
1470- f = open(APPSCALE_HOME + '/.appscale/my_private_ip', 'r')
1471- self.host = DEFAULT_HOST
1472- self.port = DEFAULT_PORT
1473- #self.pool = pool.QueuePool(self.__create_connection, reset_on_return=False)
1474- #connection.add_pool('AppScale', [self.host, self.port])
1475- self.pool = pycassa.ConnectionPool(keyspace='Keyspace1', server_list=[self.host+":"+str(self.port)], prefill=False)
1476- self.logger = logger
1477-
1478- def logTiming(self, function, start_time, end_time):
1479- if PROFILING:
1480- self.logger.debug(function + ": " + str(end_time - start_time) + " s")
1481-
1482- def get_entity(self, table_name, row_key, column_names):
1483- error = [ERROR_DEFAULT]
1484- list = error
1485- client = None
1486- row_key = table_name + '/' + row_key
1487- try:
1488- slice_predicate = SlicePredicate(column_names=column_names)
1489- path = ColumnPath(COLUMN_FAMILY)
1490- client = self.__setup_connection()
1491- # Result is a column type which has name, value, timestamp
1492- result = client.get_slice(row_key, path, slice_predicate,
1493- CONSISTENCY_QUORUM)
1494- for column in column_names:
1495- for r in result:
1496- c = r.column
1497- if column == c.name:
1498- list.append(c.value)
1499- except NotFoundException: # occurs normally if the item isn't in the db
1500- list[0] += "Not found"
1501- self.__close_connection(client)
1502- return list
1503- except Exception, ex:
1504- #self.logger.debug("Exception %s" % ex)
1505- list[0]+=("Exception: %s"%ex)
1506- self.__close_connection(client)
1507- return list
1508- self.__close_connection(client)
1509- if len(list) == 1:
1510- list[0] += "Not found"
1511- return list
1512-
1513-
1514- def put_entity(self, table_name, row_key, column_names, cell_values):
1515- error = [ERROR_DEFAULT]
1516- list = error
1517- client = None
1518-
1519- # The first time a table is seen
1520- if table_name not in table_cache:
1521- self.create_table(table_name, column_names)
1522-
1523- row_key = table_name + '/' + row_key
1524- client = self.__setup_connection()
1525- curtime = self.timestamp()
1526- # Result is a column type which has name, value, timestamp
1527- mutations = []
1528- for index, ii in enumerate(column_names):
1529- column = Column(name = ii, value=cell_values[index],
1530- timestamp=curtime)
1531- c_or_sc = ColumnOrSuperColumn(column=column)
1532- mutation = Mutation(column_or_supercolumn=c_or_sc)
1533- mutations.append(mutation)
1534- mutation_map = {row_key : { COLUMN_FAMILY : mutations } }
1535- client.batch_mutate(mutation_map, CONSISTENCY_QUORUM)
1536- """except Exception, ex:
1537- print "EXCEPTION"
1538- self.logger.debug("Exception %s" % ex)
1539- list[0]+=("Exception: %s"%ex)
1540- self.__close_connection(client)
1541- list.append("0")
1542- return list
1543- """
1544- self.__close_connection(client)
1545- list.append("0")
1546- return list
1547-
1548- def put_entity_dict(self, table_name, row_key, value_dict):
1549- raise NotImplementedError("put_entity_dict is not implemented in %s." % self.__class__)
1550-
1551-
1552- def get_table(self, table_name, column_names):
1553- error = [ERROR_DEFAULT]
1554- client = None
1555- result = error
1556- keyslices = []
1557- column_parent = ColumnParent(column_family="Standard1")
1558- predicate = SlicePredicate(column_names=column_names)
1559- start_key = table_name + "/"
1560- end_key = table_name + '/~'
1561- try:
1562- client = self.__setup_connection()
1563- keyslices = client.get_range_slice(column_parent,
1564- predicate,
1565- start_key,
1566- end_key,
1567- MAX_ROW_COUNT,
1568- CONSISTENCY_QUORUM)
1569- except Exception, ex:
1570- self.logger.debug("Exception %s" % ex)
1571- result[0] += "Exception: " + str(ex)
1572- self.__close_connection(client)
1573- return result
1574- for keyslice in keyslices:
1575- ordering_dict = {}
1576- for c in keyslice.columns:
1577- column = c.column
1578- value = column.value
1579- ordering_dict[column.name] = value
1580- if len(ordering_dict) == 0:
1581- continue
1582- for column in column_names:
1583- try:
1584- result.append(ordering_dict[column])
1585- except:
1586- result[0] += "Key error, get_table did not return the correct schema"
1587- self.__close_connection(client)
1588- return result
1589-
1590- def delete_row(self, table_name, row_key):
1591- error = [ERROR_DEFAULT]
1592- ret = error
1593- client = None
1594- row_key = table_name + '/' + row_key
1595- path = ColumnPath(COLUMN_FAMILY)
1596- try:
1597- client = self.__setup_connection()
1598- curtime = self.timestamp()
1599- # Result is a column type which has name, value, timestamp
1600- client.remove(row_key, path, curtime,
1601- CONSISTENCY_QUORUM)
1602- except Exception, ex:
1603- self.logger.debug("Exception %s" % ex)
1604- ret[0]+=("Exception: %s"%ex)
1605- self.__close_connection(client)
1606- return ret
1607- self.__close_connection(client)
1608- ret.append("0")
1609- return ret
1610-
1611- def get_schema(self, table_name):
1612- error = [ERROR_DEFAULT]
1613- result = error
1614- ret = self.get_entity(SCHEMA_TABLE,
1615- table_name,
1616- SCHEMA_TABLE_SCHEMA)
1617- if len(ret) > 1:
1618- schema = ret[1]
1619- else:
1620- error[0] = ret[0] + "--unable to get schema"
1621- return error
1622- schema = schema.split(':')
1623- result = result + schema
1624- return result
1625-
1626-
1627- def delete_table(self, table_name):
1628- error = [ERROR_DEFAULT]
1629- result = error
1630- keyslices = []
1631- column_parent = ColumnParent(column_family="Standard1")
1632- predicate = SlicePredicate(column_names=[])
1633- curtime = self.timestamp()
1634- path = ColumnPath(COLUMN_FAMILY)
1635- start_key = table_name + "/"
1636- end_key = table_name + '/~'
1637- try:
1638- client = self.__setup_connection()
1639- keyslices = client.get_range_slice(column_parent,
1640- predicate,
1641- start_key,
1642- end_key,
1643- MAX_ROW_COUNT,
1644- CONSISTENCY_QUORUM)
1645- except Exception, ex:
1646- self.logger.debug("Exception %s" % ex)
1647- result[0]+=("Exception: %s"%ex)
1648- self.__close_connection(client)
1649- return result
1650- keys_removed = False
1651- for keyslice in keyslices:
1652- row_key = keyslice.key
1653- client.remove( row_key,
1654- path,
1655- curtime,
1656- CONSISTENCY_QUORUM)
1657- keys_removed = True
1658- if table_name not in table_cache and keys_removed:
1659- result[0] += "Table does not exist"
1660- return result
1661- if table_name in table_cache:
1662- del table_cache[table_name]
1663-
1664- self.__close_connection(client)
1665- return result
1666-
1667- # Only stores the schema
1668- def create_table(self, table_name, column_names):
1669- table_cache[table_name] = 1
1670- columns = ':'.join(column_names)
1671- row_key = table_name
1672- # Get and make sure we are not overwriting previous schemas
1673- ret = self.get_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA)
1674- if ret[0] != ERROR_DEFAULT:
1675- self.put_entity(SCHEMA_TABLE, row_key, SCHEMA_TABLE_SCHEMA, [columns])
1676-
1677- ######################################################################
1678- # private methods
1679- ######################################################################
1680- def __create_connection(self):
1681- socket = TSocket.TSocket(self.host, self.port)
1682- transport = TTransport.TBufferedTransport(socket)
1683- protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
1684- client = Client(protocol)
1685- client.transport = transport
1686- client.transport.open()
1687- #keyspace = KsDef(name=MAIN_TABLE, strategy_class="org.pache.cassandra.locator.SimpleStrategy", replication_factor=1, cf_defs=cfams)
1688- #client.system_add_keyspace(MAIN_TABLE)
1689- client.set_keyspace(MAIN_TABLE)
1690-
1691- return client
1692-
1693- def __get_connection(self):
1694- return connection.get_pool("AppScale")
1695-
1696- def __setup_connection(self):
1697- return self.pool.get()
1698- #return connection.get_pool("AppScale")
1699- #return self.pool.connect()
1700-
1701- def __close_connection(self, client):
1702- if client:
1703- client.close()
1704-
1705- def timestamp(self):
1706- return int(time.time() * 1000)
1707+import py_cassandra
1708+
1709+py_cassandra = py_cassandra.DatastoreProxy()
1710+
1711+columns = ["a","b","c"]
1712+data = ["1","2","3"]
1713+table_name = "hello"
1714+key = "1"
1715+print "key= " + key
1716+print "columns= " + str(columns)
1717+print "data= " + str(data)
1718+print "table= " + table_name
1719+#print py_cassandra.put_entity("__hi__", key, columns, data)
1720+#print py_cassandra.put_entity("__hah__", key, columns, data)
1721+#exit(0)
1722+print py_cassandra.put_entity(table_name, key, columns, data)
1723+ret = py_cassandra.get_entity(table_name, key, columns)
1724+print "doing a put then get"
1725+print ret
1726+if ret[1:] != data:
1727+ print "ERROR doing a put then get. Data does not match"
1728+ print "returned: " + str(ret)
1729+ print "expected: " + str(data)
1730+ exit(1)
1731+else:
1732+ print "Success"
1733+
1734+ret = py_cassandra.get_schema("hello")
1735+print ret
1736+print "checking schema:"
1737+print ret
1738+if ret[1:] != columns:
1739+ print "ERROR in recieved schema"
1740+ print "returned: " + str(ret)
1741+ print "expected: " + str(columns)
1742+
1743+ret = py_cassandra.delete_row(table_name, key)
1744+print "Deleting the key %s"%key
1745+print ret
1746+
1747+ret = py_cassandra.get_entity(table_name, key, columns)
1748+print "Trying to get deleted key:"
1749+print ret
1750+print "doing a put with key %s"%key
1751+print py_cassandra.put_entity("hello", "1", ["a","b","c"], ["1","2","3"])
1752+print "doing a get table"
1753+print py_cassandra.get_table("hello", ["a","b","c"])
1754+py_cassandra.put_entity("hello", "2", ["a","b","c"], ["4","5","6"])
1755+print "doing get table:"
1756+print py_cassandra.get_table("hello", ["a","b","c"])
1757+py_cassandra.put_entity("hello", "3", ["a","b","c"], ["1","2","3"])
1758+py_cassandra.get_table("hello", ["a","b","c"])
1759+
1760+print "TRYING TO REPLACE KEY 3"
1761+py_cassandra.put_entity("hello", "3", ["a","b","c"], ["1","2","3"])
1762+print "TRYING TO REPLACE KEY 3"
1763+py_cassandra.get_table("hello", ["a","b","c"])
1764+print "TRYING TO REPLACE KEY 3"
1765+ret = py_cassandra.delete_row("hello", "1")
1766+print "TRYING TO REPLACE KEY 3"
1767+ret = py_cassandra.delete_row("hello", "2")
1768+print "TRYING TO REPLACE KEY 3"
1769+ret = py_cassandra.delete_row("hello", "3")
1770+print "TRYING TO REPLACE KEY 3"
1771+py_cassandra.get_table("hello", ["a","b","c"])
1772+print "Deleting table:"
1773+print py_cassandra.delete_table("hello")
1774+print "deleting twice:"
1775+print py_cassandra.delete_table("hello")
1776+
1777+table_name = u"testing_query"
1778+print py_cassandra.delete_table(table_name)
1779+column_names = [u"c1"]
1780+limit = 1000
1781+offset = 0
1782+key = 0
1783+startrow = u"000"
1784+endrow = u"100"
1785+data = u"xxx"
1786+for ii in range(0, 101):
1787+ key = str(ii)
1788+ key = ("0" * (3 - len(key))) + key
1789+ key = unicode(key)
1790+ print "Adding key " + key
1791+ print py_cassandra.put_entity(table_name, key, column_names, [data + key])
1792+inclusive = 1
1793+notJustKeys = 0
1794+print "SUCCESS"
1795+
1796
1797=== modified file 'AppDB/hadoop/hadoop_helper.rb'
1798--- AppDB/hadoop/hadoop_helper.rb 2010-12-09 21:17:29 +0000
1799+++ AppDB/hadoop/hadoop_helper.rb 2012-02-16 06:02:21 +0000
1800@@ -1,5 +1,5 @@
1801 require 'djinn'
1802-HADOOP_VER = "0.20.2"
1803+HADOOP_VER = "0.20.2-cdh3u3"
1804 HADOOP_LOC = "#{APPSCALE_HOME}/AppDB/hadoop-" + HADOOP_VER
1805 HDFS_PORT = 9000
1806 ENABLE_HADOOP_SINGLE_NODE = true
1807
1808=== modified file 'AppDB/hadoop/patch/hadoop-hbase.patch'
1809--- AppDB/hadoop/patch/hadoop-hbase.patch 2010-04-08 00:25:36 +0000
1810+++ AppDB/hadoop/patch/hadoop-hbase.patch 2012-02-16 06:02:21 +0000
1811@@ -1,18 +1,20 @@
1812 *** src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java.org 2009-04-08 22:15:30.000000000 -0700
1813 --- src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java 2010-04-07 17:12:19.000000000 -0700
1814 ***************
1815-*** 20,26 ****
1816+*** 20,27 ****
1817
1818 import java.io.*;
1819 import java.net.*;
1820+ import java.util.ArrayList;
1821 !
1822 import org.apache.hadoop.fs.permission.FsPermission;
1823 import org.apache.hadoop.fs.*;
1824 import org.apache.hadoop.conf.Configuration;
1825---- 20,26 ----
1826+--- 20,27 ----
1827
1828 import java.io.*;
1829 import java.net.*;
1830+ import java.util.ArrayList;
1831 ! import org.apache.hadoop.util.*;
1832 import org.apache.hadoop.fs.permission.FsPermission;
1833 import org.apache.hadoop.fs.*;
1834
1835=== added file 'AppDB/hadoop/templates/hadoop'
1836--- AppDB/hadoop/templates/hadoop 1970-01-01 00:00:00 +0000
1837+++ AppDB/hadoop/templates/hadoop 2012-02-16 06:02:21 +0000
1838@@ -0,0 +1,468 @@
1839+#!/usr/bin/env bash
1840+# Licensed to the Apache Software Foundation (ASF) under one or more
1841+# contributor license agreements. See the NOTICE file distributed with
1842+# this work for additional information regarding copyright ownership.
1843+# The ASF licenses this file to You under the Apache License, Version 2.0
1844+# (the "License"); you may not use this file except in compliance with
1845+# the License. You may obtain a copy of the License at
1846+#
1847+# http://www.apache.org/licenses/LICENSE-2.0
1848+#
1849+# Unless required by applicable law or agreed to in writing, software
1850+# distributed under the License is distributed on an "AS IS" BASIS,
1851+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1852+# See the License for the specific language governing permissions and
1853+# limitations under the License.
1854+
1855+
1856+# The Hadoop command script
1857+#
1858+# Environment Variables
1859+#
1860+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
1861+#
1862+# HADOOP_CLASSPATH Extra Java CLASSPATH entries.
1863+#
1864+# HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is
1865+# added in the beginning of the global
1866+# classpath. Can be defined, for example,
1867+# by doing
1868+# export HADOOP_USER_CLASSPATH_FIRST=true
1869+#
1870+# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
1871+# Default is 1000.
1872+#
1873+# HADOOP_OPTS Extra Java runtime options.
1874+#
1875+# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS
1876+# HADOOP_CLIENT_OPTS when the respective command is run.
1877+# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
1878+# for e.g. HADOOP_CLIENT_OPTS applies to
1879+# more than one command (fs, dfs, fsck,
1880+# dfsadmin etc)
1881+#
1882+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
1883+#
1884+# HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
1885+#
1886+
1887+bin=`dirname "$0"`
1888+bin=`cd "$bin"; pwd`
1889+
1890+. "$bin"/hadoop-config.sh
1891+
1892+HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
1893+
1894+cygwin=false
1895+case "`uname`" in
1896+CYGWIN*) cygwin=true;;
1897+esac
1898+
1899+# if no args specified, show usage
1900+if [ $# = 0 ]; then
1901+ echo "Usage: hadoop [--config confdir] COMMAND"
1902+ echo "where COMMAND is one of:"
1903+ echo " namenode -format format the DFS filesystem"
1904+ echo " secondarynamenode run the DFS secondary namenode"
1905+ echo " namenode run the DFS namenode"
1906+ echo " datanode run a DFS datanode"
1907+ echo " dfsadmin run a DFS admin client"
1908+ echo " mradmin run a Map-Reduce admin client"
1909+ echo " fsck run a DFS filesystem checking utility"
1910+ echo " fs run a generic filesystem user client"
1911+ echo " balancer run a cluster balancing utility"
1912+ echo " fetchdt fetch a delegation token from the NameNode"
1913+ echo " jobtracker run the MapReduce job Tracker node"
1914+ echo " pipes run a Pipes job"
1915+ echo " tasktracker run a MapReduce task Tracker node"
1916+ echo " job manipulate MapReduce jobs"
1917+ echo " queue get information regarding JobQueues"
1918+ echo " version print the version"
1919+ echo " jar <jar> run a jar file"
1920+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
1921+ echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
1922+ echo " oiv apply the offline fsimage viewer to an fsimage"
1923+ echo " classpath prints the class path needed to get the"
1924+ echo " dfsgroups get the groups which users belong to on the Name Node"
1925+ echo " mrgroups get the groups which users belong to on the Job Tracker"
1926+ echo " Hadoop jar and the required libraries"
1927+ echo " daemonlog get/set the log level for each daemon"
1928+ echo " or"
1929+ echo " CLASSNAME run the class named CLASSNAME"
1930+ echo "Most commands print help when invoked w/o parameters."
1931+ exit 1
1932+fi
1933+
1934+# get arguments
1935+COMMAND=$1
1936+shift
1937+
1938+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
1939+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
1940+fi
1941+
1942+# some Java parameters
1943+if [ "$JAVA_HOME" != "" ]; then
1944+ #echo "run java in $JAVA_HOME"
1945+ JAVA_HOME=$JAVA_HOME
1946+fi
1947+
1948+if [ "$JAVA_HOME" = "" ]; then
1949+ echo "Error: JAVA_HOME is not set."
1950+ exit 1
1951+fi
1952+
1953+JAVA=$JAVA_HOME/bin/java
1954+JAVA_HEAP_MAX=-Xmx1000m
1955+
1956+# check envvars which might override default args
1957+if [ "$HADOOP_HEAPSIZE" != "" ]; then
1958+ #echo "run with heapsize $HADOOP_HEAPSIZE"
1959+ JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"
1960+ #echo $JAVA_HEAP_MAX
1961+fi
1962+
1963+# CLASSPATH initially contains $HADOOP_CONF_DIR
1964+CLASSPATH="${HADOOP_CONF_DIR}"
1965+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
1966+if [ "$HADOOP_USER_CLASSPATH_FIRST" != "" ] && [ "$HADOOP_CLASSPATH" != "" ] ; then
1967+ CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
1968+fi
1969+
1970+# for developers, add Hadoop classes to CLASSPATH
1971+if [ -d "$HADOOP_HOME/build/classes" ]; then
1972+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
1973+fi
1974+if [ -d "$HADOOP_HOME/build/webapps" ]; then
1975+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
1976+fi
1977+if [ -d "$HADOOP_HOME/build/test/classes" ]; then
1978+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
1979+fi
1980+if [ -d "$HADOOP_HOME/build/tools" ]; then
1981+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools
1982+fi
1983+
1984+# so that filenames w/ spaces are handled correctly in loops below
1985+IFS=
1986+
1987+# for releases, add core hadoop jar & webapps to CLASSPATH
1988+if [ -d "$HADOOP_HOME/webapps" ]; then
1989+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME
1990+fi
1991+for f in $HADOOP_HOME/hadoop-core-*.jar; do
1992+ CLASSPATH=${CLASSPATH}:$f;
1993+done
1994+
1995+# add libs to CLASSPATH
1996+for f in $HADOOP_HOME/lib/*.jar; do
1997+ CLASSPATH=${CLASSPATH}:$f;
1998+done
1999+
2000+if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then
2001+for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do
2002+ CLASSPATH=${CLASSPATH}:$f;
2003+done
2004+fi
2005+
2006+for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
2007+ CLASSPATH=${CLASSPATH}:$f;
2008+done
2009+
2010+for f in $HADOOP_HOME/hadoop-tools-*.jar; do
2011+ TOOL_PATH=${TOOL_PATH}:$f;
2012+done
2013+for f in $HADOOP_HOME/build/hadoop-tools-*.jar; do
2014+ TOOL_PATH=${TOOL_PATH}:$f;
2015+done
2016+
2017+# add user-specified CLASSPATH last
2018+if [ "$HADOOP_USER_CLASSPATH_FIRST" = "" ] && [ "$HADOOP_CLASSPATH" != "" ]; then
2019+ CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
2020+fi
2021+
2022+# default log directory & file
2023+if [ "$HADOOP_LOG_DIR" = "" ]; then
2024+ HADOOP_LOG_DIR="$HADOOP_HOME/logs"
2025+fi
2026+if [ "$HADOOP_LOGFILE" = "" ]; then
2027+ HADOOP_LOGFILE='hadoop.log'
2028+fi
2029+
2030+# default policy file for service-level authorization
2031+if [ "$HADOOP_POLICYFILE" = "" ]; then
2032+ HADOOP_POLICYFILE="hadoop-policy.xml"
2033+fi
2034+
2035+# restore ordinary behaviour
2036+unset IFS
2037+
2038+# figure out which class to run
2039+if [ "$COMMAND" = "classpath" ] ; then
2040+ if $cygwin; then
2041+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
2042+ fi
2043+ echo $CLASSPATH
2044+ exit
2045+elif [ "$COMMAND" = "namenode" ] ; then
2046+ CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
2047+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
2048+elif [ "$COMMAND" = "secondarynamenode" ] ; then
2049+ CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
2050+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
2051+elif [ "$COMMAND" = "datanode" ] ; then
2052+ CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
2053+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_OPTS"
2054+elif [ "$COMMAND" = "fs" ] ; then
2055+ CLASS=org.apache.hadoop.fs.FsShell
2056+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2057+elif [ "$COMMAND" = "dfs" ] ; then
2058+ CLASS=org.apache.hadoop.fs.FsShell
2059+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2060+elif [ "$COMMAND" = "dfsadmin" ] ; then
2061+ CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
2062+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2063+elif [ "$COMMAND" = "mradmin" ] ; then
2064+ CLASS=org.apache.hadoop.mapred.tools.MRAdmin
2065+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2066+elif [ "$COMMAND" = "fsck" ] ; then
2067+ CLASS=org.apache.hadoop.hdfs.tools.DFSck
2068+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2069+elif [ "$COMMAND" = "balancer" ] ; then
2070+ CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
2071+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
2072+elif [ "$COMMAND" = "fetchdt" ] ; then
2073+ CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
2074+elif [ "$COMMAND" = "jobtracker" ] ; then
2075+ CLASS=org.apache.hadoop.mapred.JobTracker
2076+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS"
2077+elif [ "$COMMAND" = "tasktracker" ] ; then
2078+ CLASS=org.apache.hadoop.mapred.TaskTracker
2079+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS"
2080+elif [ "$COMMAND" = "job" ] ; then
2081+ CLASS=org.apache.hadoop.mapred.JobClient
2082+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2083+elif [ "$COMMAND" = "queue" ] ; then
2084+ CLASS=org.apache.hadoop.mapred.JobQueueClient
2085+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2086+elif [ "$COMMAND" = "pipes" ] ; then
2087+ CLASS=org.apache.hadoop.mapred.pipes.Submitter
2088+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2089+elif [ "$COMMAND" = "version" ] ; then
2090+ CLASS=org.apache.hadoop.util.VersionInfo
2091+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2092+elif [ "$COMMAND" = "jar" ] ; then
2093+ CLASS=org.apache.hadoop.util.RunJar
2094+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2095+elif [ "$COMMAND" = "distcp" ] ; then
2096+ CLASS=org.apache.hadoop.tools.DistCp
2097+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
2098+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2099+elif [ "$COMMAND" = "daemonlog" ] ; then
2100+ CLASS=org.apache.hadoop.log.LogLevel
2101+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2102+elif [ "$COMMAND" = "archive" ] ; then
2103+ CLASS=org.apache.hadoop.tools.HadoopArchives
2104+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
2105+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2106+elif [ "$COMMAND" = "oiv" ] ; then
2107+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
2108+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
2109+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2110+elif [ "$COMMAND" = "sampler" ] ; then
2111+ CLASS=org.apache.hadoop.mapred.lib.InputSampler
2112+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2113+elif [ "$COMMAND" = "dfsgroups" ] ; then
2114+ CLASS=org.apache.hadoop.hdfs.tools.GetGroups
2115+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2116+elif [ "$COMMAND" = "mrgroups" ] ; then
2117+ CLASS=org.apache.hadoop.mapred.tools.GetGroups
2118+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2119+elif [[ "$COMMAND" = -* ]] ; then
2120+ # class and package names cannot begin with a -
2121+ echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
2122+ exit 1
2123+else
2124+ CLASS=$COMMAND
2125+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
2126+fi
2127+
2128+# cygwin path translation
2129+if $cygwin; then
2130+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
2131+ HADOOP_HOME=`cygpath -w "$HADOOP_HOME"`
2132+ HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
2133+ TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
2134+ JAVA_LIBRARY_PATH=`cygpath -p -w "$JAVA_LIBRARY_PATH"`
2135+fi
2136+
2137+# setup 'java.library.path' for native-hadoop code if necessary
2138+if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" -o -d "${HADOOP_HOME}/sbin" ]; then
2139+ JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m ${HADOOP_JAVA_PLATFORM_OPTS} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
2140+
2141+ if [ -d "$HADOOP_HOME/build/native" ]; then
2142+ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
2143+ JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
2144+ else
2145+ JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
2146+ fi
2147+ fi
2148+
2149+ if [ -d "${HADOOP_HOME}/lib/native" ]; then
2150+ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
2151+ JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
2152+ else
2153+ JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
2154+ fi
2155+ fi
2156+
2157+ _JSVC_PATH=${HADOOP_HOME}/sbin/${JAVA_PLATFORM}/jsvc
2158+fi
2159+
2160+# cygwin path translation
2161+if $cygwin; then
2162+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
2163+fi
2164+
2165+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
2166+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
2167+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
2168+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
2169+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
2170+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
2171+ HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
2172+fi
2173+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
2174+
2175+
2176+###########################################################################
2177+# DAEMON SETTINGS
2178+###########################################################################
2179+# For any command that ends in 'node', we are starting one of the daemons.
2180+# In this case, we do some special processing in order to automatically
2181+# setuid to the correct user.
2182+#
2183+# The user itself is determined as one of the following, in descending
2184+# precedence:
2185+# HADOOP_<node>NODE_USER variable
2186+# the current userid, so long as that userid is not root
2187+#
2188+# After the above is determined, it is stored into the local variable
2189+# _HADOOP_DAEMON_USER
2190+#
2191+# We also need to determine the "run mode". This can be one of the following:
2192+#
2193+# "jsvc" - only supported for the datanode - we use the jsvc wrapper in
2194+# the sbin/<platform> directory in order to setuid to the target
2195+# user. Requires that this script is running as root.
2196+# "su" - supported only when running as root and /bin/su exists.
2197+# Uses su in order to assume the identity of the daemon user.
2198+# "normal" - supported only when already running as the target user.
2199+###########################################################################
2200+if [[ "$COMMAND" == *node ]] || [[ "$COMMAND" == *tracker ]]; then
2201+ command_uc=$(echo $COMMAND| tr a-z A-Z)
2202+ user_var="HADOOP_${command_uc}_USER"
2203+ _HADOOP_DAEMON_USER=$(eval "echo \$$user_var")
2204+ _HADOOP_DAEMON_USER=${_HADOOP_DAEMON_USER:-$(id -un)}
2205+
2206+ if [ -z "$_HADOOP_DAEMON_USER" ]; then
2207+ echo Please specify a user to run the $COMMAND by setting $user_var
2208+ #exit 1
2209+ elif [ "$_HADOOP_DAEMON_USER" == "root" ]; then
2210+ echo May not run daemons as root. Please specify $user_var
2211+ #exit 1
2212+ fi
2213+
2214+ if [ "$EUID" = "0" ] ; then
2215+ if [ "$COMMAND" == "datanode" ] && [ -x "$_JSVC_PATH" ]; then
2216+ _HADOOP_RUN_MODE="jsvc"
2217+ elif [ -x /bin/su ]; then
2218+ _HADOOP_RUN_MODE="su"
2219+ else
2220+ echo "Daemon wants to run as $_HADOOP_DAEMON_USER but script is running as root"
2221+ echo "and su is not available."
2222+ #exit 1
2223+ fi
2224+ else
2225+ # We must be running as the user we want to run as, if we can't use jsvc or su
2226+ # to drop privileges
2227+ if [ "$_HADOOP_DAEMON_USER" != "$(whoami)" ]; then
2228+ echo Daemon wants to run as $_HADOOP_DAEMON_USER but not running as that user or root.
2229+ #exit 1
2230+ fi
2231+ _HADOOP_RUN_MODE="normal"
2232+ fi
2233+else
2234+ # Normal client command
2235+ _HADOOP_RUN_MODE="normal"
2236+fi
2237+
2238+###########################################################################
2239+# Actually run the JVM
2240+###########################################################################
2241+case "$_HADOOP_RUN_MODE" in
2242+ jsvc)
2243+ case "$COMMAND" in
2244+ datanode)
2245+ _JSVC_STARTER_CLASS=org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter
2246+ ;;
2247+ *)
2248+ echo "Cannot start $COMMAND with jsvc"
2249+ exit 1
2250+ ;;
2251+ esac
2252+
2253+ if [ "$_HADOOP_DAEMON_DETACHED" = "true" ]; then
2254+ _JSVC_FLAGS="-pidfile $_HADOOP_DAEMON_PIDFILE
2255+ -errfile &1
2256+ -outfile $_HADOOP_DAEMON_OUT"
2257+ else
2258+ # Even though we are trying to run a non-detached datanode,
2259+ # jsvc will not write to stdout/stderr, so we have to pipe
2260+ # it and tail the logfile.
2261+ _JSVC_FLAGS="-nodetach
2262+ -errfile &1
2263+ -outfile $HADOOP_LOG_DIR/jsvc.out"
2264+ echo Non-detached jsvc output piping to: $HADOOP_LOG_DIR/jsvc.out
2265+ touch $HADOOP_LOG_DIR/jsvc.out
2266+ tail -f $HADOOP_LOG_DIR/jsvc.out &
2267+ fi
2268+ unset _HADOOP_DAEMON_DETACHED
2269+
2270+ exec "$_JSVC_PATH" -Dproc_$COMMAND \
2271+ $_JSVC_FLAGS \
2272+ -user "$_HADOOP_DAEMON_USER" \
2273+ -cp "$CLASSPATH" \
2274+ $JAVA_HEAP_MAX $HADOOP_OPTS \
2275+ $_JSVC_STARTER_CLASS "$@"
2276+ ;;
2277+
2278+ normal | su)
2279+ # If we need to su, tack the command into a local variable
2280+ if [ $_HADOOP_RUN_MODE = "su" ]; then
2281+ _JAVA_EXEC="su $_HADOOP_DAEMON_USER -s $JAVA --"
2282+ else
2283+ _JAVA_EXEC="$JAVA"
2284+ fi
2285+
2286+ if [ "$_HADOOP_DAEMON_DETACHED" = "true" ]; then
2287+ unset _HADOOP_DAEMON_DETACHED
2288+ touch $_HADOOP_DAEMON_OUT
2289+ nohup $_JAVA_EXEC -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" > "$_HADOOP_DAEMON_OUT" 2>&1 < /dev/null &
2290+ if [ "$EUID" == "0" ]; then
2291+ chown $_HADOOP_DAEMON_USER $_HADOOP_DAEMON_OUT
2292+ fi
2293+ echo $! > "$_HADOOP_DAEMON_PIDFILE"
2294+ sleep 1
2295+ head "$_HADOOP_DAEMON_OUT"
2296+ else
2297+ # For normal operation, just run the command
2298+ exec $_JAVA_EXEC -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
2299+ fi
2300+ ;;
2301+
2302+ *)
2303+ echo Bad run mode: $_HADOOP_RUN_MODE
2304+ exit 1
2305+ ;;
2306+esac
2307
2308=== modified file 'AppDB/hbase/hbase-status.sh'
2309--- AppDB/hbase/hbase-status.sh 2010-11-18 09:50:11 +0000
2310+++ AppDB/hbase/hbase-status.sh 2012-02-16 06:02:21 +0000
2311@@ -1,6 +1,6 @@
2312 #!/bin/sh
2313
2314-val=`echo "status 'summary'" | ${APPSCALE_HOME}/AppDB/hbase/hbase-0.89.20100924/bin/hbase shell | mawk '/load$/{print $1}'`
2315+val=`echo "status 'summary'" | ${APPSCALE_HOME}/AppDB/hbase/hbase-0.90.4-cdh3u3/bin/hbase shell | mawk '/load$/{print $1}'`
2316 if [ -z "$val" ]; then
2317 echo "0"
2318 else
2319
2320=== modified file 'AppDB/hbase/hbase_helper.rb'
2321--- AppDB/hbase/hbase_helper.rb 2010-12-09 21:17:29 +0000
2322+++ AppDB/hbase/hbase_helper.rb 2012-02-16 06:02:21 +0000
2323@@ -3,7 +3,7 @@
2324 require 'helperfunctions'
2325 require "#{APPSCALE_HOME}/AppDB/hadoop/hadoop_helper"
2326
2327-HBASE_LOC = "#{APPSCALE_HOME}/AppDB/hbase/hbase-0.89.20100924"
2328+HBASE_LOC = "#{APPSCALE_HOME}/AppDB/hbase/hbase-0.90.4-cdh3u3"
2329 THRIFT_PORT = 9090
2330 MASTER_SERVER_PORT = 60000
2331 ENABLE_SINGLE_NODE = true
2332
2333=== added file 'AppDB/hbase/patch/HMaster.java'
2334--- AppDB/hbase/patch/HMaster.java 1970-01-01 00:00:00 +0000
2335+++ AppDB/hbase/patch/HMaster.java 2012-02-16 06:02:21 +0000
2336@@ -0,0 +1,1171 @@
2337+/**
2338+ * Copyright 2011 The Apache Software Foundation
2339+ *
2340+ * Licensed to the Apache Software Foundation (ASF) under one
2341+ * or more contributor license agreements. See the NOTICE file
2342+ * distributed with this work for additional information
2343+ * regarding copyright ownership. The ASF licenses this file
2344+ * to you under the Apache License, Version 2.0 (the
2345+ * "License"); you may not use this file except in compliance
2346+ * with the License. You may obtain a copy of the License at
2347+ *
2348+ * http://www.apache.org/licenses/LICENSE-2.0
2349+ *
2350+ * Unless required by applicable law or agreed to in writing, software
2351+ * distributed under the License is distributed on an "AS IS" BASIS,
2352+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2353+ * See the License for the specific language governing permissions and
2354+ * limitations under the License.
2355+ */
2356+package org.apache.hadoop.hbase.master;
2357+
2358+import java.io.IOException;
2359+import java.lang.reflect.Constructor;
2360+import java.lang.reflect.InvocationTargetException;
2361+import java.net.InetSocketAddress;
2362+import java.net.UnknownHostException;
2363+import java.util.ArrayList;
2364+import java.util.HashMap;
2365+import java.util.List;
2366+import java.util.Map;
2367+import java.util.concurrent.atomic.AtomicReference;
2368+
2369+import org.apache.commons.logging.Log;
2370+import org.apache.commons.logging.LogFactory;
2371+import org.apache.hadoop.conf.Configuration;
2372+import org.apache.hadoop.hbase.Chore;
2373+import org.apache.hadoop.hbase.ClusterStatus;
2374+import org.apache.hadoop.hbase.HColumnDescriptor;
2375+import org.apache.hadoop.hbase.HConstants;
2376+import org.apache.hadoop.hbase.HMsg;
2377+import org.apache.hadoop.hbase.HRegionInfo;
2378+import org.apache.hadoop.hbase.HServerAddress;
2379+import org.apache.hadoop.hbase.HServerInfo;
2380+import org.apache.hadoop.hbase.HTableDescriptor;
2381+import org.apache.hadoop.hbase.MasterNotRunningException;
2382+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
2383+import org.apache.hadoop.hbase.Server;
2384+import org.apache.hadoop.hbase.TableExistsException;
2385+import org.apache.hadoop.hbase.TableNotDisabledException;
2386+import org.apache.hadoop.hbase.TableNotFoundException;
2387+import org.apache.hadoop.hbase.UnknownRegionException;
2388+import org.apache.hadoop.hbase.catalog.CatalogTracker;
2389+import org.apache.hadoop.hbase.catalog.MetaEditor;
2390+import org.apache.hadoop.hbase.catalog.MetaReader;
2391+import org.apache.hadoop.hbase.client.MetaScanner;
2392+import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
2393+import org.apache.hadoop.hbase.client.Result;
2394+import org.apache.hadoop.hbase.executor.ExecutorService;
2395+import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
2396+import org.apache.hadoop.hbase.ipc.HBaseRPC;
2397+import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
2398+import org.apache.hadoop.hbase.ipc.HBaseServer;
2399+import org.apache.hadoop.hbase.ipc.HMasterInterface;
2400+import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
2401+import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan;
2402+import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
2403+import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
2404+import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
2405+import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
2406+import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
2407+import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
2408+import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
2409+import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
2410+import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
2411+import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
2412+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
2413+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
2414+import org.apache.hadoop.hbase.regionserver.HRegion;
2415+import org.apache.hadoop.hbase.regionserver.wal.HLog;
2416+import org.apache.hadoop.hbase.replication.regionserver.Replication;
2417+import org.apache.hadoop.hbase.security.User;
2418+import org.apache.hadoop.hbase.util.Bytes;
2419+import org.apache.hadoop.hbase.util.InfoServer;
2420+import org.apache.hadoop.hbase.util.Pair;
2421+import org.apache.hadoop.hbase.util.Sleeper;
2422+import org.apache.hadoop.hbase.util.Strings;
2423+import org.apache.hadoop.hbase.util.Threads;
2424+import org.apache.hadoop.hbase.util.VersionInfo;
2425+import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
2426+import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
2427+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
2428+import org.apache.hadoop.io.MapWritable;
2429+import org.apache.hadoop.io.Text;
2430+import org.apache.hadoop.net.DNS;
2431+import org.apache.zookeeper.KeeperException;
2432+import org.apache.zookeeper.Watcher;
2433+
2434+/**
2435+ * HMaster is the "master server" for HBase. An HBase cluster has one active
2436+ * master. If many masters are started, all compete. Whichever wins goes on to
2437+ * run the cluster. All others park themselves in their constructor until
2438+ * master or cluster shutdown or until the active master loses its lease in
2439+ * zookeeper. Thereafter, all running master jostle to take over master role.
2440+ *
2441+ * <p>The Master can be asked shutdown the cluster. See {@link #shutdown()}. In
2442+ * this case it will tell all regionservers to go down and then wait on them
2443+ * all reporting in that they are down. This master will then shut itself down.
2444+ *
2445+ * <p>You can also shutdown just this master. Call {@link #stopMaster()}.
2446+ *
2447+ * @see HMasterInterface
2448+ * @see HMasterRegionInterface
2449+ * @see Watcher
2450+ */
2451+public class HMaster extends Thread
2452+implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
2453+ private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
2454+
2455+ // MASTER is name of the webapp and the attribute name used stuffing this
2456+ //instance into web context.
2457+ public static final String MASTER = "master";
2458+
2459+ // The configuration for the Master
2460+ private final Configuration conf;
2461+ // server for the web ui
2462+ private InfoServer infoServer;
2463+
2464+ // Our zk client.
2465+ private ZooKeeperWatcher zooKeeper;
2466+ // Manager and zk listener for master election
2467+ private ActiveMasterManager activeMasterManager;
2468+ // Region server tracker
2469+ private RegionServerTracker regionServerTracker;
2470+
2471+ // RPC server for the HMaster
2472+ private final HBaseServer rpcServer;
2473+ // Address of the HMaster
2474+ private final HServerAddress address;
2475+ // Metrics for the HMaster
2476+ private final MasterMetrics metrics;
2477+ // file system manager for the master FS operations
2478+ private MasterFileSystem fileSystemManager;
2479+
2480+ // server manager to deal with region server info
2481+ private ServerManager serverManager;
2482+
2483+ // manager of assignment nodes in zookeeper
2484+ AssignmentManager assignmentManager;
2485+ // manager of catalog regions
2486+ private CatalogTracker catalogTracker;
2487+ // Cluster status zk tracker and local setter
2488+ private ClusterStatusTracker clusterStatusTracker;
2489+
2490+ // buffer for "fatal error" notices from region servers
2491+ // in the cluster. This is only used for assisting
2492+ // operations/debugging.
2493+ private MemoryBoundedLogMessageBuffer rsFatals;
2494+
2495+ // This flag is for stopping this Master instance. Its set when we are
2496+ // stopping or aborting
2497+ private volatile boolean stopped = false;
2498+ // Set on abort -- usually failure of our zk session.
2499+ private volatile boolean abort = false;
2500+ // flag set after we become the active master (used for testing)
2501+ private volatile boolean isActiveMaster = false;
2502+ // flag set after we complete initialization once active (used for testing)
2503+ private volatile boolean initialized = false;
2504+
2505+ // Instance of the hbase executor service.
2506+ ExecutorService executorService;
2507+
2508+ private LoadBalancer balancer;
2509+ private Thread balancerChore;
2510+ // If 'true', the balancer is 'on'. If 'false', the balancer will not run.
2511+ private volatile boolean balanceSwitch = true;
2512+
2513+ private Thread catalogJanitorChore;
2514+ private LogCleaner logCleaner;
2515+
2516+ /**
2517+ * Initializes the HMaster. The steps are as follows:
2518+ * <p>
2519+ * <ol>
2520+ * <li>Initialize HMaster RPC and address
2521+ * <li>Connect to ZooKeeper.
2522+ * </ol>
2523+ * <p>
2524+ * Remaining steps of initialization occur in {@link #run()} so that they
2525+ * run in their own thread rather than within the context of the constructor.
2526+ * @throws InterruptedException
2527+ */
2528+ public HMaster(final Configuration conf)
2529+ throws IOException, KeeperException, InterruptedException {
2530+ this.conf = conf;
2531+
2532+ /*
2533+ * Determine address and initialize RPC server (but do not start).
2534+ * The RPC server ports can be ephemeral. Create a ZKW instance.
2535+ */
2536+ HServerAddress a = new HServerAddress(getMyAddress(this.conf));
2537+ int numHandlers = conf.getInt("hbase.regionserver.handler.count", 10);
2538+ this.rpcServer = HBaseRPC.getServer(this,
2539+ new Class<?>[]{HMasterInterface.class, HMasterRegionInterface.class},
2540+ a.getBindAddress(), a.getPort(),
2541+ numHandlers,
2542+ 0, // we dont use high priority handlers in master
2543+ false, conf,
2544+ 0); // this is a DNC w/o high priority handlers
2545+ this.address = new HServerAddress(rpcServer.getListenerAddress());
2546+
2547+ // initialize server principal (if using secure Hadoop)
2548+ User.login(conf, "hbase.master.keytab.file",
2549+ "hbase.master.kerberos.principal", this.address.getHostname());
2550+
2551+ // set the thread name now we have an address
2552+ setName(MASTER + "-" + this.address);
2553+
2554+ Replication.decorateMasterConfiguration(this.conf);
2555+
2556+ this.rpcServer.startThreads();
2557+
2558+ // Hack! Maps DFSClient => Master for logs. HDFS made this
2559+ // config param for task trackers, but we can piggyback off of it.
2560+ if (this.conf.get("mapred.task.id") == null) {
2561+ this.conf.set("mapred.task.id", "hb_m_" + this.address.toString() +
2562+ "_" + System.currentTimeMillis());
2563+ }
2564+
2565+ this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" +
2566+ address.getPort(), this);
2567+
2568+ this.metrics = new MasterMetrics(getServerName());
2569+ }
2570+
2571+ /**
2572+ * Stall startup if we are designated a backup master; i.e. we want someone
2573+ * else to become the master before proceeding.
2574+ * @param c
2575+ * @param amm
2576+ * @throws InterruptedException
2577+ */
2578+ private static void stallIfBackupMaster(final Configuration c,
2579+ final ActiveMasterManager amm)
2580+ throws InterruptedException {
2581+ // If we're a backup master, stall until a primary to writes his address
2582+ if (!c.getBoolean(HConstants.MASTER_TYPE_BACKUP,
2583+ HConstants.DEFAULT_MASTER_TYPE_BACKUP)) {
2584+ return;
2585+ }
2586+ LOG.debug("HMaster started in backup mode. " +
2587+ "Stalling until master znode is written.");
2588+ // This will only be a minute or so while the cluster starts up,
2589+ // so don't worry about setting watches on the parent znode
2590+ while (!amm.isActiveMaster()) {
2591+ LOG.debug("Waiting for master address ZNode to be written " +
2592+ "(Also watching cluster state node)");
2593+ Thread.sleep(c.getInt("zookeeper.session.timeout", 180 * 1000));
2594+ }
2595+ }
2596+
2597+ /**
2598+ * Main processing loop for the HMaster.
2599+ * <ol>
2600+ * <li>Block until becoming active master
2601+ * <li>Finish initialization via {@link #finishInitialization()}
2602+ * <li>Enter loop until we are stopped
2603+ * <li>Stop services and perform cleanup once stopped
2604+ * </ol>
2605+ */
2606+ @Override
2607+ public void run() {
2608+ MonitoredTask startupStatus =
2609+ TaskMonitor.get().createStatus("Master startup");
2610+ startupStatus.setDescription("Master startup");
2611+ try {
2612+ /*
2613+ * Block on becoming the active master.
2614+ *
2615+ * We race with other masters to write our address into ZooKeeper. If we
2616+ * succeed, we are the primary/active master and finish initialization.
2617+ *
2618+ * If we do not succeed, there is another active master and we should
2619+ * now wait until it dies to try and become the next active master. If we
2620+ * do not succeed on our first attempt, this is no longer a cluster startup.
2621+ */
2622+ this.activeMasterManager = new ActiveMasterManager(zooKeeper, address,
2623+ this);
2624+ this.zooKeeper.registerListener(activeMasterManager);
2625+ stallIfBackupMaster(this.conf, this.activeMasterManager);
2626+ this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus);
2627+ // We are either the active master or we were asked to shutdown
2628+ if (!this.stopped) {
2629+ finishInitialization(startupStatus);
2630+ loop();
2631+ }
2632+ } catch (Throwable t) {
2633+ abort("Unhandled exception. Starting shutdown.", t);
2634+ } finally {
2635+ startupStatus.cleanup();
2636+
2637+ stopChores();
2638+ // Wait for all the remaining region servers to report in IFF we were
2639+ // running a cluster shutdown AND we were NOT aborting.
2640+ if (!this.abort && this.serverManager != null &&
2641+ this.serverManager.isClusterShutdown()) {
2642+ this.serverManager.letRegionServersShutdown();
2643+ }
2644+ stopServiceThreads();
2645+ // Stop services started for both backup and active masters
2646+ if (this.activeMasterManager != null) this.activeMasterManager.stop();
2647+ if (this.catalogTracker != null) this.catalogTracker.stop();
2648+ if (this.serverManager != null) this.serverManager.stop();
2649+ if (this.assignmentManager != null) this.assignmentManager.stop();
2650+ if (this.fileSystemManager != null) this.fileSystemManager.stop();
2651+ this.zooKeeper.close();
2652+ }
2653+ LOG.info("HMaster main thread exiting");
2654+ }
2655+
2656+ private void loop() {
2657+ // Check if we should stop every second.
2658+ Sleeper sleeper = new Sleeper(1000, this);
2659+ while (!this.stopped) {
2660+ sleeper.sleep();
2661+ }
2662+ }
2663+
2664+ /**
2665+ * Finish initialization of HMaster after becoming the primary master.
2666+ *
2667+ * <ol>
2668+ * <li>Initialize master components - file system manager, server manager,
2669+ * assignment manager, region server tracker, catalog tracker, etc</li>
2670+ * <li>Start necessary service threads - rpc server, info server,
2671+ * executor services, etc</li>
2672+ * <li>Set cluster as UP in ZooKeeper</li>
2673+ * <li>Wait for RegionServers to check-in</li>
2674+ * <li>Split logs and perform data recovery, if necessary</li>
2675+ * <li>Ensure assignment of root and meta regions<li>
2676+ * <li>Handle either fresh cluster start or master failover</li>
2677+ * </ol>
2678+ *
2679+ * @throws IOException
2680+ * @throws InterruptedException
2681+ * @throws KeeperException
2682+ */
2683+ private void finishInitialization(MonitoredTask status)
2684+ throws IOException, InterruptedException, KeeperException {
2685+
2686+ isActiveMaster = true;
2687+
2688+ /*
2689+ * We are active master now... go initialize components we need to run.
2690+ * Note, there may be dross in zk from previous runs; it'll get addressed
2691+ * below after we determine if cluster startup or failover.
2692+ */
2693+
2694+ status.setStatus("Initializing Master file system");
2695+ // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
2696+ this.fileSystemManager = new MasterFileSystem(this, metrics);
2697+ this.executorService = new ExecutorService(getServerName());
2698+ this.rsFatals = new MemoryBoundedLogMessageBuffer(
2699+ conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
2700+
2701+ this.serverManager = new ServerManager(this, this, metrics);
2702+
2703+ status.setStatus("Initializing ZK system trackers");
2704+ this.catalogTracker = new CatalogTracker(this.zooKeeper, this.conf,
2705+ this, conf.getInt("hbase.master.catalog.timeout", Integer.MAX_VALUE));
2706+ this.catalogTracker.start();
2707+
2708+ this.assignmentManager = new AssignmentManager(this, serverManager,
2709+ this.catalogTracker, this.executorService);
2710+ this.balancer = new LoadBalancer(conf);
2711+ zooKeeper.registerListenerFirst(assignmentManager);
2712+
2713+ this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
2714+ this.serverManager);
2715+ this.regionServerTracker.start();
2716+
2717+ // Set the cluster as up. If new RSs, they'll be waiting on this before
2718+ // going ahead with their startup.
2719+ this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this);
2720+ this.clusterStatusTracker.start();
2721+ boolean wasUp = this.clusterStatusTracker.isClusterUp();
2722+ if (!wasUp) this.clusterStatusTracker.setClusterUp();
2723+
2724+ LOG.info("Server active/primary master; " + this.address +
2725+ ", sessionid=0x" +
2726+ Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId()) +
2727+ ", cluster-up flag was=" + wasUp);
2728+
2729+ // start up all service threads.
2730+ status.setStatus("Initializing master service threads");
2731+ startServiceThreads();
2732+
2733+ // Wait for region servers to report in. Returns count of regions.
2734+ int regionCount = this.serverManager.waitForRegionServers(status);
2735+
2736+ // TODO: Should do this in background rather than block master startup
2737+ status.setStatus("Splitting logs after master startup");
2738+ this.fileSystemManager.
2739+ splitLogAfterStartup(this.serverManager.getOnlineServers());
2740+
2741+ // Make sure root and meta assigned before proceeding.
2742+ assignRootAndMeta(status);
2743+
2744+ // Is this fresh start with no regions assigned or are we a master joining
2745+ // an already-running cluster? If regionsCount == 0, then for sure a
2746+ // fresh start. TOOD: Be fancier. If regionsCount == 2, perhaps the
2747+ // 2 are .META. and -ROOT- and we should fall into the fresh startup
2748+ // branch below. For now, do processFailover.
2749+ if (regionCount == 0) {
2750+ LOG.info("Master startup proceeding: cluster startup");
2751+ this.assignmentManager.cleanoutUnassigned();
2752+ this.assignmentManager.assignAllUserRegions();
2753+ } else {
2754+ LOG.info("Master startup proceeding: master failover");
2755+ this.assignmentManager.processFailover();
2756+ }
2757+
2758+ // Fixing up missing daughters if any
2759+ status.setStatus("Fixing up missing daughters");
2760+ fixupDaughters(status);
2761+
2762+ // Start balancer and meta catalog janitor after meta and regions have
2763+ // been assigned.
2764+ status.setStatus("Starting balancer and catalog janitor");
2765+ this.balancerChore = getAndStartBalancerChore(this);
2766+ this.catalogJanitorChore =
2767+ Threads.setDaemonThreadRunning(new CatalogJanitor(this, this));
2768+
2769+ status.markComplete("Initialization successful");
2770+ LOG.info("Master has completed initialization");
2771+ initialized = true;
2772+ }
2773+
2774+ /**
2775+ * Check <code>-ROOT-</code> and <code>.META.</code> are assigned. If not,
2776+ * assign them.
2777+ * @throws InterruptedException
2778+ * @throws IOException
2779+ * @throws KeeperException
2780+ * @return Count of regions we assigned.
2781+ */
2782+ int assignRootAndMeta(MonitoredTask status)
2783+ throws InterruptedException, IOException, KeeperException {
2784+ int assigned = 0;
2785+ long timeout = this.conf.getLong("hbase.catalog.verification.timeout", 1000);
2786+
2787+ // Work on ROOT region. Is it in zk in transition?
2788+ status.setStatus("Assigning ROOT region");
2789+ boolean rit = this.assignmentManager.
2790+ processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.ROOT_REGIONINFO);
2791+ if (!catalogTracker.verifyRootRegionLocation(timeout)) {
2792+ this.assignmentManager.assignRoot();
2793+ this.catalogTracker.waitForRoot();
2794+ assigned++;
2795+ }
2796+ LOG.info("-ROOT- assigned=" + assigned + ", rit=" + rit +
2797+ ", location=" + catalogTracker.getRootLocation());
2798+
2799+ // Work on meta region
2800+ status.setStatus("Assigning META region");
2801+ rit = this.assignmentManager.
2802+ processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.FIRST_META_REGIONINFO);
2803+ if (!this.catalogTracker.verifyMetaRegionLocation(timeout)) {
2804+ this.assignmentManager.assignMeta();
2805+ this.catalogTracker.waitForMeta();
2806+ // Above check waits for general meta availability but this does not
2807+ // guarantee that the transition has completed
2808+ this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
2809+ assigned++;
2810+ }
2811+ LOG.info(".META. assigned=" + assigned + ", rit=" + rit +
2812+ ", location=" + catalogTracker.getMetaLocation());
2813+ status.setStatus("META and ROOT assigned.");
2814+ return assigned;
2815+ }
2816+
2817+ void fixupDaughters(final MonitoredTask status) throws IOException {
2818+ final Map<HRegionInfo, Result> offlineSplitParents =
2819+ new HashMap<HRegionInfo, Result>();
2820+ // This visitor collects offline split parents in the .META. table
2821+ MetaReader.Visitor visitor = new MetaReader.Visitor() {
2822+ @Override
2823+ public boolean visit(Result r) throws IOException {
2824+ if (r == null || r.isEmpty()) return true;
2825+ HRegionInfo info = CatalogJanitor.getHRegionInfo(r);
2826+ if (info == null) return true; // Keep scanning
2827+ if (info.isOffline() && info.isSplit()) {
2828+ offlineSplitParents.put(info, r);
2829+ }
2830+ // Returning true means "keep scanning"
2831+ return true;
2832+ }
2833+ };
2834+ // Run full scan of .META. catalog table passing in our custom visitor
2835+ MetaReader.fullScan(this.catalogTracker, visitor);
2836+ // Now work on our list of found parents. See if any we can clean up.
2837+ int fixups = 0;
2838+ for (Map.Entry<HRegionInfo, Result> e : offlineSplitParents.entrySet()) {
2839+ fixups += ServerShutdownHandler.fixupDaughters(
2840+ e.getValue(), assignmentManager, catalogTracker);
2841+ }
2842+ if (fixups != 0) {
2843+ LOG.info("Scanned the catalog and fixed up " + fixups +
2844+ " missing daughter region(s)");
2845+ }
2846+ }
2847+
2848+ /*
2849+ * @return This masters' address.
2850+ * @throws UnknownHostException
2851+ */
2852+ private static String getMyAddress(final Configuration c)
2853+ throws UnknownHostException {
2854+ // Find out our address up in DNS.
2855+ String s = DNS.getDefaultIP(c.get("hbase.master.dns.interface","default"));
2856+ s += ":" + c.get(HConstants.MASTER_PORT,
2857+ Integer.toString(HConstants.DEFAULT_MASTER_PORT));
2858+ return s;
2859+ }
2860+
2861+ /** @return HServerAddress of the master server */
2862+ public HServerAddress getMasterAddress() {
2863+ return this.address;
2864+ }
2865+
2866+ public long getProtocolVersion(String protocol, long clientVersion) {
2867+ return HBaseRPCProtocolVersion.versionID;
2868+ }
2869+
2870+ /** @return InfoServer object. Maybe null.*/
2871+ public InfoServer getInfoServer() {
2872+ return this.infoServer;
2873+ }
2874+
2875+ @Override
2876+ public Configuration getConfiguration() {
2877+ return this.conf;
2878+ }
2879+
2880+ @Override
2881+ public ServerManager getServerManager() {
2882+ return this.serverManager;
2883+ }
2884+
2885+ @Override
2886+ public ExecutorService getExecutorService() {
2887+ return this.executorService;
2888+ }
2889+
2890+ @Override
2891+ public MasterFileSystem getMasterFileSystem() {
2892+ return this.fileSystemManager;
2893+ }
2894+
2895+ /**
2896+ * Get the ZK wrapper object - needed by master_jsp.java
2897+ * @return the zookeeper wrapper
2898+ */
2899+ public ZooKeeperWatcher getZooKeeperWatcher() {
2900+ return this.zooKeeper;
2901+ }
2902+
2903+ /*
2904+ * Start up all services. If any of these threads gets an unhandled exception
2905+ * then they just die with a logged message. This should be fine because
2906+ * in general, we do not expect the master to get such unhandled exceptions
2907+ * as OOMEs; it should be lightly loaded. See what HRegionServer does if
2908+ * need to install an unexpected exception handler.
2909+ */
2910+ private void startServiceThreads() throws IOException{
2911+
2912+ // Start the executor service pools
2913+ this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
2914+ conf.getInt("hbase.master.executor.openregion.threads", 5));
2915+ this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
2916+ conf.getInt("hbase.master.executor.closeregion.threads", 5));
2917+ this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
2918+ conf.getInt("hbase.master.executor.serverops.threads", 3));
2919+ this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
2920+ conf.getInt("hbase.master.executor.serverops.threads", 5));
2921+
2922+ // We depend on there being only one instance of this executor running
2923+ // at a time. To do concurrency, would need fencing of enable/disable of
2924+ // tables.
2925+ this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
2926+
2927+ // Start log cleaner thread
2928+ String n = Thread.currentThread().getName();
2929+ this.logCleaner =
2930+ new LogCleaner(conf.getInt("hbase.master.cleaner.interval", 60 * 1000),
2931+ this, conf, getMasterFileSystem().getFileSystem(),
2932+ getMasterFileSystem().getOldLogDir());
2933+ Threads.setDaemonThreadRunning(logCleaner, n + ".oldLogCleaner");
2934+
2935+ // Put up info server.
2936+ int port = this.conf.getInt("hbase.master.info.port", 60010);
2937+ if (port >= 0) {
2938+ String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
2939+ this.infoServer = new InfoServer(MASTER, a, port, false);
2940+ this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
2941+ this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
2942+ this.infoServer.setAttribute(MASTER, this);
2943+ this.infoServer.start();
2944+ }
2945+
2946+ // Start allowing requests to happen.
2947+ this.rpcServer.openServer();
2948+ if (LOG.isDebugEnabled()) {
2949+ LOG.debug("Started service threads");
2950+ }
2951+
2952+ }
2953+
2954+ private void stopServiceThreads() {
2955+ if (LOG.isDebugEnabled()) {
2956+ LOG.debug("Stopping service threads");
2957+ }
2958+ if (this.rpcServer != null) this.rpcServer.stop();
2959+ // Clean up and close up shop
2960+ if (this.logCleaner!= null) this.logCleaner.interrupt();
2961+ if (this.infoServer != null) {
2962+ LOG.info("Stopping infoServer");
2963+ try {
2964+ this.infoServer.stop();
2965+ } catch (Exception ex) {
2966+ ex.printStackTrace();
2967+ }
2968+ }
2969+ if (this.executorService != null) this.executorService.shutdown();
2970+ }
2971+
2972+ private static Thread getAndStartBalancerChore(final HMaster master) {
2973+ String name = master.getServerName() + "-BalancerChore";
2974+ int period = master.getConfiguration().getInt("hbase.balancer.period", 300000);
2975+ // Start up the load balancer chore
2976+ Chore chore = new Chore(name, period, master) {
2977+ @Override
2978+ protected void chore() {
2979+ master.balance();
2980+ }
2981+ };
2982+ return Threads.setDaemonThreadRunning(chore);
2983+ }
2984+
2985+ private void stopChores() {
2986+ if (this.balancerChore != null) {
2987+ this.balancerChore.interrupt();
2988+ }
2989+ if (this.catalogJanitorChore != null) {
2990+ this.catalogJanitorChore.interrupt();
2991+ }
2992+ }
2993+
2994+ @Override
2995+ public MapWritable regionServerStartup(final HServerInfo serverInfo,
2996+ final long serverCurrentTime)
2997+ throws IOException {
2998+ // Set the ip into the passed in serverInfo. Its ip is more than likely
2999+ // not the ip that the master sees here. See at end of this method where
3000+ // we pass it back to the regionserver by setting "hbase.regionserver.address"
3001+ // Everafter, the HSI combination 'server name' is what uniquely identifies
3002+ // the incoming RegionServer.
3003+ InetSocketAddress address = new InetSocketAddress(
3004+ HBaseServer.getRemoteIp().getHostName(),
3005+ serverInfo.getServerAddress().getPort());
3006+ serverInfo.setServerAddress(new HServerAddress(address));
3007+
3008+ // Register with server manager
3009+ this.serverManager.regionServerStartup(serverInfo, serverCurrentTime);
3010+ // Send back some config info
3011+ MapWritable mw = createConfigurationSubset();
3012+ mw.put(new Text("hbase.regionserver.address"),
3013+ serverInfo.getServerAddress());
3014+ return mw;
3015+ }
3016+
3017+ /**
3018+ * @return Subset of configuration to pass initializing regionservers: e.g.
3019+ * the filesystem to use and root directory to use.
3020+ */
3021+ protected MapWritable createConfigurationSubset() {
3022+ MapWritable mw = addConfig(new MapWritable(), HConstants.HBASE_DIR);
3023+ return addConfig(mw, "fs.default.name");
3024+ }
3025+
3026+ private MapWritable addConfig(final MapWritable mw, final String key) {
3027+ mw.put(new Text(key), new Text(this.conf.get(key)));
3028+ return mw;
3029+ }
3030+
3031+ @Override
3032+ public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[],
3033+ HRegionInfo[] mostLoadedRegions)
3034+ throws IOException {
3035+ return adornRegionServerAnswer(serverInfo,
3036+ this.serverManager.regionServerReport(serverInfo, msgs, mostLoadedRegions));
3037+ }
3038+
3039+ @Override
3040+ public void reportRSFatalError(HServerInfo serverInfo,
3041+ String errorText) {
3042+ String msg = "Region server " + serverInfo + " reported a fatal error:\n"
3043+ + errorText;
3044+ LOG.error(msg);
3045+ rsFatals.add(msg);
3046+ }
3047+
3048+ /**
3049+ * Override if you'd add messages to return to regionserver <code>hsi</code>
3050+ * or to send an exception.
3051+ * @param msgs Messages to add to
3052+ * @return Messages to return to
3053+ * @throws IOException exceptions that were injected for the region servers
3054+ */
3055+ protected HMsg [] adornRegionServerAnswer(final HServerInfo hsi,
3056+ final HMsg [] msgs) throws IOException {
3057+ return msgs;
3058+ }
3059+
3060+ public boolean isMasterRunning() {
3061+ return !isStopped();
3062+ }
3063+
3064+ @Override
3065+ public boolean balance() {
3066+ // If balance not true, don't run balancer.
3067+ if (!this.balanceSwitch) return false;
3068+ synchronized (this.balancer) {
3069+ // Only allow one balance run at at time.
3070+ if (this.assignmentManager.isRegionsInTransition()) {
3071+ LOG.debug("Not running balancer because " +
3072+ this.assignmentManager.getRegionsInTransition().size() +
3073+ " region(s) in transition: " +
3074+ org.apache.commons.lang.StringUtils.
3075+ abbreviate(this.assignmentManager.getRegionsInTransition().toString(), 256));
3076+ return false;
3077+ }
3078+ if (this.serverManager.areDeadServersInProgress()) {
3079+ LOG.debug("Not running balancer because processing dead regionserver(s): " +
3080+ this.serverManager.getDeadServers());
3081+ return false;
3082+ }
3083+ Map<HServerInfo, List<HRegionInfo>> assignments =
3084+ this.assignmentManager.getAssignments();
3085+ // Returned Map from AM does not include mention of servers w/o assignments.
3086+ for (Map.Entry<String, HServerInfo> e:
3087+ this.serverManager.getOnlineServers().entrySet()) {
3088+ HServerInfo hsi = e.getValue();
3089+ if (!assignments.containsKey(hsi)) {
3090+ assignments.put(hsi, new ArrayList<HRegionInfo>());
3091+ }
3092+ }
3093+ List<RegionPlan> plans = this.balancer.balanceCluster(assignments);
3094+ if (plans != null && !plans.isEmpty()) {
3095+ for (RegionPlan plan: plans) {
3096+ LOG.info("balance " + plan);
3097+ this.assignmentManager.balance(plan);
3098+ }
3099+ }
3100+ }
3101+ return true;
3102+ }
3103+
3104+ @Override
3105+ public boolean balanceSwitch(final boolean b) {
3106+ boolean oldValue = this.balanceSwitch;
3107+ this.balanceSwitch = b;
3108+ LOG.info("Balance=" + b);
3109+ return oldValue;
3110+ }
3111+
3112+ /**
3113+ * Switch for the background {@link CatalogJanitor} thread.
3114+ * Used for testing. The thread will continue to run. It will just be a noop
3115+ * if disabled.
3116+ * @param b If false, the catalog janitor won't do anything.
3117+ */
3118+ public void setCatalogJanitorEnabled(final boolean b) {
3119+ ((CatalogJanitor)this.catalogJanitorChore).setEnabled(b);
3120+ }
3121+
3122+ @Override
3123+ public void move(final byte[] encodedRegionName, final byte[] destServerName)
3124+ throws UnknownRegionException {
3125+ Pair<HRegionInfo, HServerInfo> p =
3126+ this.assignmentManager.getAssignment(encodedRegionName);
3127+ if (p == null)
3128+ throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
3129+ HRegionInfo hri = p.getFirst();
3130+ HServerInfo dest = null;
3131+ if (destServerName == null || destServerName.length == 0) {
3132+ LOG.info("Passed destination servername is null/empty so " +
3133+ "choosing a server at random");
3134+ this.assignmentManager.clearRegionPlan(hri);
3135+ // Unassign will reassign it elsewhere choosing random server.
3136+ this.assignmentManager.unassign(hri);
3137+ } else {
3138+ dest = this.serverManager.getServerInfo(new String(destServerName));
3139+ RegionPlan rp = new RegionPlan(p.getFirst(), p.getSecond(), dest);
3140+ this.assignmentManager.balance(rp);
3141+ }
3142+ }
3143+
3144+ public void createTable(HTableDescriptor desc, byte [][] splitKeys)
3145+ throws IOException {
3146+ createTable(desc, splitKeys, false);
3147+ }
3148+
3149+ public void createTable(HTableDescriptor desc, byte [][] splitKeys,
3150+ boolean sync)
3151+ throws IOException {
3152+ if (!isMasterRunning()) {
3153+ throw new MasterNotRunningException();
3154+ }
3155+ HRegionInfo [] newRegions = null;
3156+ if(splitKeys == null || splitKeys.length == 0) {
3157+ newRegions = new HRegionInfo [] { new HRegionInfo(desc, null, null) };
3158+ } else {
3159+ int numRegions = splitKeys.length + 1;
3160+ newRegions = new HRegionInfo[numRegions];
3161+ byte [] startKey = null;
3162+ byte [] endKey = null;
3163+ for(int i=0;i<numRegions;i++) {
3164+ endKey = (i == splitKeys.length) ? null : splitKeys[i];
3165+ newRegions[i] = new HRegionInfo(desc, startKey, endKey);
3166+ startKey = endKey;
3167+ }
3168+ }
3169+ int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
3170+ // Need META availability to create a table
3171+ try {
3172+ if(catalogTracker.waitForMeta(timeout) == null) {
3173+ throw new NotAllMetaRegionsOnlineException();
3174+ }
3175+ } catch (InterruptedException e) {
3176+ LOG.warn("Interrupted waiting for meta availability", e);
3177+ throw new IOException(e);
3178+ }
3179+ createTable(newRegions, sync);
3180+ }
3181+
3182+ private synchronized void createTable(final HRegionInfo [] newRegions,
3183+ final boolean sync)
3184+ throws IOException {
3185+ String tableName = newRegions[0].getTableDesc().getNameAsString();
3186+ if(MetaReader.tableExists(catalogTracker, tableName)) {
3187+ throw new TableExistsException(tableName);
3188+ }
3189+ // 1. Set table enabling flag up in zk.
3190+ try {
3191+ assignmentManager.getZKTable().setEnabledTable(tableName);
3192+ } catch (KeeperException e) {
3193+ throw new IOException("Unable to ensure that the table will be" +
3194+ " enabled because of a ZooKeeper issue", e);
3195+ }
3196+
3197+ List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
3198+ final int batchSize = this.conf.getInt("hbase.master.createtable.batchsize", 100);
3199+ HLog hlog = null;
3200+ for (int regionIdx = 0; regionIdx < newRegions.length; regionIdx++) {
3201+ HRegionInfo newRegion = newRegions[regionIdx];
3202+
3203+ // 2. Create HRegion
3204+ HRegion region = HRegion.createHRegion(newRegion,
3205+ fileSystemManager.getRootDir(), conf, hlog);
3206+
3207+ if (hlog == null) {
3208+ hlog = region.getLog();
3209+ }
3210+
3211+ regionInfos.add(region.getRegionInfo());
3212+ if (regionIdx % batchSize == 0) {
3213+ // 3. Insert into META
3214+ MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
3215+ regionInfos.clear();
3216+ }
3217+
3218+ // 4. Close the new region to flush to disk. Close log file too.
3219+ region.close();
3220+ }
3221+ hlog.closeAndDelete();
3222+ if (regionInfos.size() > 0) {
3223+ MetaEditor.addRegionsToMeta(catalogTracker, regionInfos);
3224+ }
3225+
3226+ // 5. Trigger immediate assignment of the regions in round-robin fashion
3227+ if (newRegions.length == 1) {
3228+ this.assignmentManager.assign(newRegions[0], true);
3229+ } else {
3230+ List<HServerInfo> servers = serverManager.getOnlineServersList();
3231+ this.assignmentManager.bulkAssignUserRegions(newRegions, servers, sync);
3232+ }
3233+
3234+ // 6. If sync, wait for assignment of regions
3235+ if (sync) {
3236+ LOG.debug("Waiting for " + newRegions.length + " region(s) to be assigned");
3237+ for (HRegionInfo regionInfo : newRegions) {
3238+ try {
3239+ this.assignmentManager.waitForAssignment(regionInfo);
3240+ } catch (InterruptedException e) {
3241+ LOG.info("Interrupted waiting for region to be assigned during " +
3242+ "create table call", e);
3243+ Thread.currentThread().interrupt();
3244+ return;
3245+ }
3246+ }
3247+ }
3248+ }
3249+
3250+ private static boolean isCatalogTable(final byte [] tableName) {
3251+ return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) ||
3252+ Bytes.equals(tableName, HConstants.META_TABLE_NAME);
3253+ }
3254+
3255+ public void deleteTable(final byte [] tableName) throws IOException {
3256+ this.executorService.submit(new DeleteTableHandler(tableName, this, this));
3257+ }
3258+
3259+ public void addColumn(byte [] tableName, HColumnDescriptor column)
3260+ throws IOException {
3261+ new TableAddFamilyHandler(tableName, column, this, this).process();
3262+ }
3263+
3264+ public void modifyColumn(byte [] tableName, HColumnDescriptor descriptor)
3265+ throws IOException {
3266+ new TableModifyFamilyHandler(tableName, descriptor, this, this).process();
3267+ }
3268+
3269+ public void deleteColumn(final byte [] tableName, final byte [] c)
3270+ throws IOException {
3271+ new TableDeleteFamilyHandler(tableName, c, this, this).process();
3272+ }
3273+
3274+ public void enableTable(final byte [] tableName) throws IOException {
3275+ this.executorService.submit(new EnableTableHandler(this, tableName,
3276+ catalogTracker, assignmentManager));
3277+ }
3278+
3279+ public void disableTable(final byte [] tableName) throws IOException {
3280+ this.executorService.submit(new DisableTableHandler(this, tableName,
3281+ catalogTracker, assignmentManager));
3282+ }
3283+
3284+ /**
3285+ * Return the region and current deployment for the region containing
3286+ * the given row. If the region cannot be found, returns null. If it
3287+ * is found, but not currently deployed, the second element of the pair
3288+ * may be null.
3289+ */
3290+ Pair<HRegionInfo,HServerAddress> getTableRegionForRow(
3291+ final byte [] tableName, final byte [] rowKey)
3292+ throws IOException {
3293+ final AtomicReference<Pair<HRegionInfo, HServerAddress>> result =
3294+ new AtomicReference<Pair<HRegionInfo, HServerAddress>>(null);
3295+
3296+ MetaScannerVisitor visitor =
3297+ new MetaScannerVisitor() {
3298+ @Override
3299+ public boolean processRow(Result data) throws IOException {
3300+ if (data == null || data.size() <= 0) {
3301+ return true;
3302+ }
3303+ Pair<HRegionInfo, HServerAddress> pair =
3304+ MetaReader.metaRowToRegionPair(data);
3305+ if (pair == null) {
3306+ return false;
3307+ }
3308+ if (!Bytes.equals(pair.getFirst().getTableDesc().getName(),
3309+ tableName)) {
3310+ return false;
3311+ }
3312+ result.set(pair);
3313+ return true;
3314+ }
3315+ };
3316+
3317+ MetaScanner.metaScan(conf, visitor, tableName, rowKey, 1);
3318+ return result.get();
3319+ }
3320+
3321+ @Override
3322+ public void modifyTable(final byte[] tableName, HTableDescriptor htd)
3323+ throws IOException {
3324+ this.executorService.submit(new ModifyTableHandler(tableName, htd, this, this));
3325+ }
3326+
3327+ @Override
3328+ public void checkTableModifiable(final byte [] tableName)
3329+ throws IOException {
3330+ String tableNameStr = Bytes.toString(tableName);
3331+ if (isCatalogTable(tableName)) {
3332+ throw new IOException("Can't modify catalog tables");
3333+ }
3334+ if (!MetaReader.tableExists(getCatalogTracker(), tableNameStr)) {
3335+ throw new TableNotFoundException(tableNameStr);
3336+ }
3337+ if (!getAssignmentManager().getZKTable().
3338+ isDisabledTable(Bytes.toString(tableName))) {
3339+ throw new TableNotDisabledException(tableName);
3340+ }
3341+ }
3342+
3343+ public void clearFromTransition(HRegionInfo hri) {
3344+ if (this.assignmentManager.isRegionInTransition(hri) != null) {
3345+ this.assignmentManager.clearRegionFromTransition(hri);
3346+ }
3347+ }
3348+ /**
3349+ * @return cluster status
3350+ */
3351+ public ClusterStatus getClusterStatus() {
3352+ ClusterStatus status = new ClusterStatus();
3353+ status.setHBaseVersion(VersionInfo.getVersion());
3354+ status.setServerInfo(serverManager.getOnlineServers().values());
3355+ status.setDeadServers(serverManager.getDeadServers());
3356+ status.setRegionsInTransition(assignmentManager.getRegionsInTransition());
3357+ return status;
3358+ }
3359+
3360+ @Override
3361+ public void abort(final String msg, final Throwable t) {
3362+ if (t != null) LOG.fatal(msg, t);
3363+ else LOG.fatal(msg);
3364+ this.abort = true;
3365+ stop("Aborting");
3366+ }
3367+
3368+ @Override
3369+ public ZooKeeperWatcher getZooKeeper() {
3370+ return zooKeeper;
3371+ }
3372+
3373+ @Override
3374+ public String getServerName() {
3375+ return address.toString();
3376+ }
3377+
3378+ @Override
3379+ public CatalogTracker getCatalogTracker() {
3380+ return catalogTracker;
3381+ }
3382+
3383+ @Override
3384+ public AssignmentManager getAssignmentManager() {
3385+ return this.assignmentManager;
3386+ }
3387+
3388+ public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
3389+ return rsFatals;
3390+ }
3391+
3392+ @Override
3393+ public void shutdown() {
3394+ this.serverManager.shutdownCluster();
3395+ try {
3396+ this.clusterStatusTracker.setClusterDown();
3397+ } catch (KeeperException e) {
3398+ LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
3399+ }
3400+ }
3401+
3402+ @Override
3403+ public void stopMaster() {
3404+ stop("Stopped by " + Thread.currentThread().getName());
3405+ }
3406+
3407+ @Override
3408+ public void stop(final String why) {
3409+ LOG.info(why);
3410+ this.stopped = true;
3411+ // If we are a backup master, we need to interrupt wait
3412+ synchronized (this.activeMasterManager.clusterHasActiveMaster) {
3413+ this.activeMasterManager.clusterHasActiveMaster.notifyAll();
3414+ }
3415+ }
3416+
3417+ @Override
3418+ public boolean isStopped() {
3419+ return this.stopped;
3420+ }
3421+
3422+ /**
3423+ * Report whether this master is currently the active master or not.
3424+ * If not active master, we are parked on ZK waiting to become active.
3425+ *
3426+ * This method is used for testing.
3427+ *
3428+ * @return true if active master, false if not.
3429+ */
3430+ public boolean isActiveMaster() {
3431+ return isActiveMaster;
3432+ }
3433+
3434+ /**
3435+ * Report whether this master has completed with its initialization and is
3436+ * ready. If ready, the master is also the active master. A standby master
3437+ * is never ready.
3438+ *
3439+ * This method is used for testing.
3440+ *
3441+ * @return true if master is ready to go, false if not.
3442+ */
3443+ public boolean isInitialized() {
3444+ return initialized;
3445+ }
3446+
3447+ @Override
3448+ public void assign(final byte [] regionName, final boolean force)
3449+ throws IOException {
3450+ Pair<HRegionInfo, HServerAddress> pair =
3451+ MetaReader.getRegion(this.catalogTracker, regionName);
3452+ if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
3453+ assignRegion(pair.getFirst());
3454+ }
3455+
3456+ public void assignRegion(HRegionInfo hri) {
3457+ assignmentManager.assign(hri, true);
3458+ }
3459+
3460+ @Override
3461+ public void unassign(final byte [] regionName, final boolean force)
3462+ throws IOException {
3463+ Pair<HRegionInfo, HServerAddress> pair =
3464+ MetaReader.getRegion(this.catalogTracker, regionName);
3465+ if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
3466+ HRegionInfo hri = pair.getFirst();
3467+ if (force) {
3468+ this.assignmentManager.clearRegionFromTransition(hri);
3469+ assignRegion(hri);
3470+ } else {
3471+ this.assignmentManager.unassign(hri, force);
3472+ }
3473+ }
3474+
3475+ /**
3476+ * Utility for constructing an instance of the passed HMaster class.
3477+ * @param masterClass
3478+ * @param conf
3479+ * @return HMaster instance.
3480+ */
3481+ public static HMaster constructMaster(Class<? extends HMaster> masterClass,
3482+ final Configuration conf) {
3483+ try {
3484+ Constructor<? extends HMaster> c =
3485+ masterClass.getConstructor(Configuration.class);
3486+ return c.newInstance(conf);
3487+ } catch (InvocationTargetException ite) {
3488+ Throwable target = ite.getTargetException() != null?
3489+ ite.getTargetException(): ite;
3490+ if (target.getCause() != null) target = target.getCause();
3491+ throw new RuntimeException("Failed construction of Master: " +
3492+ masterClass.toString(), target);
3493+ } catch (Exception e) {
3494+ throw new RuntimeException("Failed construction of Master: " +
3495+ masterClass.toString() + ((e.getCause() != null)?
3496+ e.getCause().getMessage(): ""), e);
3497+ }
3498+ }
3499+
3500+
3501+ /**
3502+ * @see org.apache.hadoop.hbase.master.HMasterCommandLine
3503+ */
3504+ public static void main(String [] args) throws Exception {
3505+ new HMasterCommandLine(HMaster.class).doMain(args);
3506+ }
3507+}
3508
3509=== added file 'AppDB/hbase/patch/HRegionServer.java'
3510--- AppDB/hbase/patch/HRegionServer.java 1970-01-01 00:00:00 +0000
3511+++ AppDB/hbase/patch/HRegionServer.java 2012-02-16 06:02:21 +0000
3512@@ -0,0 +1,2828 @@
3513+/**
3514+ * Copyright 2010 The Apache Software Foundation
3515+ *
3516+ * Licensed to the Apache Software Foundation (ASF) under one
3517+ * or more contributor license agreements. See the NOTICE file
3518+ * distributed with this work for additional information
3519+ * regarding copyright ownership. The ASF licenses this file
3520+ * to you under the Apache License, Version 2.0 (the
3521+ * "License"); you may not use this file except in compliance
3522+ * with the License. You may obtain a copy of the License at
3523+ *
3524+ * http://www.apache.org/licenses/LICENSE-2.0
3525+ *
3526+ * Unless required by applicable law or agreed to in writing, software
3527+ * distributed under the License is distributed on an "AS IS" BASIS,
3528+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3529+ * See the License for the specific language governing permissions and
3530+ * limitations under the License.
3531+ */
3532+package org.apache.hadoop.hbase.regionserver;
3533+
3534+import java.io.IOException;
3535+import java.lang.Thread.UncaughtExceptionHandler;
3536+import java.lang.annotation.Retention;
3537+import java.lang.annotation.RetentionPolicy;
3538+import java.lang.management.ManagementFactory;
3539+import java.lang.management.MemoryUsage;
3540+import java.lang.reflect.Constructor;
3541+import java.lang.reflect.Method;
3542+import java.net.BindException;
3543+import java.net.InetSocketAddress;
3544+import java.util.ArrayList;
3545+import java.util.Collection;
3546+import java.util.Collections;
3547+import java.util.Comparator;
3548+import java.util.HashMap;
3549+import java.util.HashSet;
3550+import java.util.Iterator;
3551+import java.util.LinkedList;
3552+import java.util.List;
3553+import java.util.Map;
3554+import java.util.Random;
3555+import java.util.Set;
3556+import java.util.SortedMap;
3557+import java.util.TreeMap;
3558+import java.util.concurrent.ConcurrentHashMap;
3559+import java.util.concurrent.ConcurrentSkipListSet;
3560+import java.util.concurrent.LinkedBlockingQueue;
3561+import java.util.concurrent.TimeUnit;
3562+import java.util.concurrent.atomic.AtomicBoolean;
3563+import java.util.concurrent.atomic.AtomicInteger;
3564+import java.util.concurrent.locks.ReentrantReadWriteLock;
3565+
3566+import org.apache.commons.logging.Log;
3567+import org.apache.commons.logging.LogFactory;
3568+import org.apache.hadoop.conf.Configuration;
3569+import org.apache.hadoop.fs.FileSystem;
3570+import org.apache.hadoop.fs.Path;
3571+import org.apache.hadoop.hbase.Chore;
3572+import org.apache.hadoop.hbase.ClockOutOfSyncException;
3573+import org.apache.hadoop.hbase.DoNotRetryIOException;
3574+import org.apache.hadoop.hbase.HBaseConfiguration;
3575+import org.apache.hadoop.hbase.HConstants;
3576+import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
3577+import org.apache.hadoop.hbase.HMsg;
3578+import org.apache.hadoop.hbase.HRegionInfo;
3579+import org.apache.hadoop.hbase.HServerAddress;
3580+import org.apache.hadoop.hbase.HServerInfo;
3581+import org.apache.hadoop.hbase.HServerLoad;
3582+import org.apache.hadoop.hbase.KeyValue;
3583+import org.apache.hadoop.hbase.MasterAddressTracker;
3584+import org.apache.hadoop.hbase.NotServingRegionException;
3585+import org.apache.hadoop.hbase.RemoteExceptionHandler;
3586+import org.apache.hadoop.hbase.Server;
3587+import org.apache.hadoop.hbase.Stoppable;
3588+import org.apache.hadoop.hbase.UnknownRowLockException;
3589+import org.apache.hadoop.hbase.UnknownScannerException;
3590+import org.apache.hadoop.hbase.YouAreDeadException;
3591+import org.apache.hadoop.hbase.catalog.CatalogTracker;
3592+import org.apache.hadoop.hbase.catalog.MetaEditor;
3593+import org.apache.hadoop.hbase.catalog.RootLocationEditor;
3594+import org.apache.hadoop.hbase.client.Action;
3595+import org.apache.hadoop.hbase.client.Delete;
3596+import org.apache.hadoop.hbase.client.Get;
3597+import org.apache.hadoop.hbase.client.Increment;
3598+import org.apache.hadoop.hbase.client.MultiAction;
3599+import org.apache.hadoop.hbase.client.MultiPut;
3600+import org.apache.hadoop.hbase.client.MultiPutResponse;
3601+import org.apache.hadoop.hbase.client.MultiResponse;
3602+import org.apache.hadoop.hbase.client.Put;
3603+import org.apache.hadoop.hbase.client.Result;
3604+import org.apache.hadoop.hbase.client.Row;
3605+import org.apache.hadoop.hbase.client.Scan;
3606+import org.apache.hadoop.hbase.executor.ExecutorService;
3607+import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
3608+import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
3609+import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
3610+import org.apache.hadoop.hbase.ipc.HBaseRPC;
3611+import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
3612+import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
3613+import org.apache.hadoop.hbase.ipc.HBaseRpcMetrics;
3614+import org.apache.hadoop.hbase.ipc.HBaseServer;
3615+import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
3616+import org.apache.hadoop.hbase.ipc.HRegionInterface;
3617+import org.apache.hadoop.hbase.ipc.ServerNotRunningException;
3618+import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
3619+import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
3620+import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
3621+import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler;
3622+import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
3623+import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
3624+import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler;
3625+import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
3626+import org.apache.hadoop.hbase.regionserver.wal.HLog;
3627+import org.apache.hadoop.hbase.regionserver.wal.WALObserver;
3628+import org.apache.hadoop.hbase.replication.regionserver.Replication;
3629+import org.apache.hadoop.hbase.security.User;
3630+import org.apache.hadoop.hbase.util.Bytes;
3631+import org.apache.hadoop.hbase.util.CompressionTest;
3632+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
3633+import org.apache.hadoop.hbase.util.FSUtils;
3634+import org.apache.hadoop.hbase.util.InfoServer;
3635+import org.apache.hadoop.hbase.util.Pair;
3636+import org.apache.hadoop.hbase.util.Sleeper;
3637+import org.apache.hadoop.hbase.util.Strings;
3638+import org.apache.hadoop.hbase.util.Threads;
3639+import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
3640+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
3641+import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
3642+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
3643+import org.apache.hadoop.io.MapWritable;
3644+import org.apache.hadoop.io.Writable;
3645+import org.apache.hadoop.ipc.RemoteException;
3646+import org.apache.hadoop.net.DNS;
3647+import org.apache.hadoop.util.StringUtils;
3648+import org.apache.zookeeper.KeeperException;
3649+
3650+import com.google.common.base.Function;
3651+import com.google.common.collect.Lists;
3652+
3653+/**
3654+ * HRegionServer makes a set of HRegions available to clients. It checks in with
3655+ * the HMaster. There are many HRegionServers in a single HBase deployment.
3656+ */
3657+public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
3658+ Runnable, RegionServerServices, Server {
3659+ public static final Log LOG = LogFactory.getLog(HRegionServer.class);
3660+
3661+ // Set when a report to the master comes back with a message asking us to
3662+ // shutdown. Also set by call to stop when debugging or running unit tests
3663+ // of HRegionServer in isolation.
3664+ protected volatile boolean stopped = false;
3665+
3666+ // A state before we go into stopped state. At this stage we're closing user
3667+ // space regions.
3668+ private boolean stopping = false;
3669+
3670+ // Go down hard. Used if file system becomes unavailable and also in
3671+ // debugging and unit tests.
3672+ protected volatile boolean abortRequested;
3673+
3674+ private volatile boolean killed = false;
3675+
3676+ // If false, the file system has become unavailable
3677+ protected volatile boolean fsOk;
3678+
3679+ protected HServerInfo serverInfo;
3680+ protected final Configuration conf;
3681+
3682+ protected final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
3683+ private FileSystem fs;
3684+ private Path rootDir;
3685+ private final Random rand = new Random();
3686+
3687+ /**
3688+ * Map of regions currently being served by this region server. Key is the
3689+ * encoded region name. All access should be synchronized.
3690+ */
3691+ protected final Map<String, HRegion> onlineRegions =
3692+ new ConcurrentHashMap<String, HRegion>();
3693+
3694+ protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
3695+ private final LinkedBlockingQueue<HMsg> outboundMsgs = new LinkedBlockingQueue<HMsg>();
3696+
3697+ final int numRetries;
3698+ protected final int threadWakeFrequency;
3699+ private final int msgInterval;
3700+
3701+ protected final int numRegionsToReport;
3702+
3703+ private final long maxScannerResultSize;
3704+
3705+ // Remote HMaster
3706+ private HMasterRegionInterface hbaseMaster;
3707+
3708+ // Server to handle client requests. Default access so can be accessed by
3709+ // unit tests.
3710+ HBaseServer server;
3711+
3712+ // Leases
3713+ private Leases leases;
3714+
3715+ // Request counter
3716+ private volatile AtomicInteger requestCount = new AtomicInteger();
3717+
3718+ // Info server. Default access so can be used by unit tests. REGIONSERVER
3719+ // is name of the webapp and the attribute name used stuffing this instance
3720+ // into web context.
3721+ InfoServer infoServer;
3722+
3723+ /** region server process name */
3724+ public static final String REGIONSERVER = "regionserver";
3725+
3726+ /*
3727+ * Space is reserved in HRS constructor and then released when aborting to
3728+ * recover from an OOME. See HBASE-706. TODO: Make this percentage of the heap
3729+ * or a minimum.
3730+ */
3731+ private final LinkedList<byte[]> reservedSpace = new LinkedList<byte[]>();
3732+
3733+ private RegionServerMetrics metrics;
3734+
3735+ // Compactions
3736+ CompactSplitThread compactSplitThread;
3737+
3738+ // Cache flushing
3739+ MemStoreFlusher cacheFlusher;
3740+
3741+ /*
3742+ * Check for major compactions.
3743+ */
3744+ Chore majorCompactionChecker;
3745+
3746+ // HLog and HLog roller. log is protected rather than private to avoid
3747+ // eclipse warning when accessed by inner classes
3748+ protected volatile HLog hlog;
3749+ LogRoller hlogRoller;
3750+
3751+ // flag set after we're done setting up server threads (used for testing)
3752+ protected volatile boolean isOnline;
3753+
3754+ final Map<String, InternalScanner> scanners = new ConcurrentHashMap<String, InternalScanner>();
3755+
3756+ // zookeeper connection and watcher
3757+ private ZooKeeperWatcher zooKeeper;
3758+
3759+ // master address manager and watcher
3760+ private MasterAddressTracker masterAddressManager;
3761+
3762+ // catalog tracker
3763+ private CatalogTracker catalogTracker;
3764+
3765+ // Cluster Status Tracker
3766+ private ClusterStatusTracker clusterStatusTracker;
3767+
3768+ // Log Splitting Worker
3769+ private SplitLogWorker splitLogWorker;
3770+
3771+ // A sleeper that sleeps for msgInterval.
3772+ private final Sleeper sleeper;
3773+
3774+ private final int rpcTimeout;
3775+
3776+ // The main region server thread.
3777+ @SuppressWarnings("unused")
3778+ private Thread regionServerThread;
3779+
3780+ // Instance of the hbase executor service.
3781+ private ExecutorService service;
3782+
3783+ // Replication services. If no replication, this handler will be null.
3784+ private Replication replicationHandler;
3785+
3786+ private final Set<byte[]> regionsInTransitionInRS =
3787+ new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
3788+
3789+ /**
3790+ * Starts a HRegionServer at the default location
3791+ *
3792+ * @param conf
3793+ * @throws IOException
3794+ * @throws InterruptedException
3795+ */
3796+ public HRegionServer(Configuration conf) throws IOException, InterruptedException {
3797+ this.fsOk = true;
3798+ this.conf = conf;
3799+ this.isOnline = false;
3800+
3801+ // check to see if the codec list is available:
3802+ String [] codecs = conf.getStrings("hbase.regionserver.codecs",
3803+ (String[])null);
3804+ if (codecs != null) {
3805+ for (String codec : codecs) {
3806+ if (!CompressionTest.testCompression(codec)) {
3807+ throw new IOException("Compression codec " + codec +
3808+ " not supported, aborting RS construction");
3809+ }
3810+ }
3811+ }
3812+
3813+ // Config'ed params
3814+ this.numRetries = conf.getInt("hbase.client.retries.number", 10);
3815+ this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY,
3816+ 10 * 1000);
3817+ this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000);
3818+
3819+ sleeper = new Sleeper(this.msgInterval, this);
3820+
3821+ this.maxScannerResultSize = conf.getLong(
3822+ HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
3823+ HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
3824+
3825+ this.numRegionsToReport = conf.getInt(
3826+ "hbase.regionserver.numregionstoreport", 10);
3827+
3828+ this.rpcTimeout = conf.getInt(
3829+ HConstants.HBASE_RPC_TIMEOUT_KEY,
3830+ HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
3831+
3832+ this.abortRequested = false;
3833+ this.stopped = false;
3834+
3835+ // Server to handle client requests
3836+ String machineName = DNS.getDefaultIP(
3837+ conf.get("hbase.regionserver.dns.interface","default"));
3838+
3839+ String addressStr = machineName + ":" +
3840+ conf.get(HConstants.REGIONSERVER_PORT,
3841+ Integer.toString(HConstants.DEFAULT_REGIONSERVER_PORT));
3842+ HServerAddress address = new HServerAddress(addressStr);
3843+ this.server = HBaseRPC.getServer(this,
3844+ new Class<?>[]{HRegionInterface.class, HBaseRPCErrorHandler.class,
3845+ OnlineRegions.class},
3846+ address.getBindAddress(),
3847+ address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
3848+ conf.getInt("hbase.regionserver.metahandler.count", 10),
3849+ false, conf, QOS_THRESHOLD);
3850+ this.server.setErrorHandler(this);
3851+ this.server.setQosFunction(new QosFunction());
3852+
3853+ // HServerInfo can be amended by master. See below in reportForDuty.
3854+ this.serverInfo = new HServerInfo(new HServerAddress(new InetSocketAddress(
3855+ address.getBindAddress(), this.server.getListenerAddress().getPort())),
3856+ System.currentTimeMillis(), this.conf.getInt(
3857+ "hbase.regionserver.info.port", 60030), machineName);
3858+ if (this.serverInfo.getServerAddress() == null) {
3859+ throw new NullPointerException("Server address cannot be null; "
3860+ + "hbase-958 debugging");
3861+ }
3862+
3863+ // login the server principal (if using secure Hadoop)
3864+ User.login(conf, "hbase.regionserver.keytab.file",
3865+ "hbase.regionserver.kerberos.principal", serverInfo.getHostname());
3866+ }
3867+
3868+ private static final int NORMAL_QOS = 0;
3869+ private static final int QOS_THRESHOLD = 10; // the line between low and high qos
3870+ private static final int HIGH_QOS = 100;
3871+
3872+ @Retention(RetentionPolicy.RUNTIME)
3873+ private @interface QosPriority {
3874+ int priority() default 0;
3875+ }
3876+
3877+ class QosFunction implements Function<Writable,Integer> {
3878+ private final Map<String, Integer> annotatedQos;
3879+
3880+ public QosFunction() {
3881+ Map<String, Integer> qosMap = new HashMap<String, Integer>();
3882+ for (Method m : HRegionServer.class.getMethods()) {
3883+ QosPriority p = m.getAnnotation(QosPriority.class);
3884+ if (p != null) {
3885+ qosMap.put(m.getName(), p.priority());
3886+ }
3887+ }
3888+
3889+ annotatedQos = qosMap;
3890+ }
3891+
3892+ public boolean isMetaRegion(byte[] regionName) {
3893+ HRegion region;
3894+ try {
3895+ region = getRegion(regionName);
3896+ } catch (NotServingRegionException ignored) {
3897+ return false;
3898+ }
3899+ return region.getRegionInfo().isMetaRegion();
3900+ }
3901+
3902+ @Override
3903+ public Integer apply(Writable from) {
3904+ if (!(from instanceof HBaseRPC.Invocation)) return NORMAL_QOS;
3905+
3906+ HBaseRPC.Invocation inv = (HBaseRPC.Invocation) from;
3907+ String methodName = inv.getMethodName();
3908+
3909+ Integer priorityByAnnotation = annotatedQos.get(methodName);
3910+ if (priorityByAnnotation != null) {
3911+ return priorityByAnnotation;
3912+ }
3913+
3914+ // scanner methods...
3915+ if (methodName.equals("next") || methodName.equals("close")) {
3916+ // translate!
3917+ Long scannerId;
3918+ try {
3919+ scannerId = (Long) inv.getParameters()[0];
3920+ } catch (ClassCastException ignored) {
3921+ // LOG.debug("Low priority: " + from);
3922+ return NORMAL_QOS; // doh.
3923+ }
3924+ String scannerIdString = Long.toString(scannerId);
3925+ InternalScanner scanner = scanners.get(scannerIdString);
3926+ if (scanner instanceof HRegion.RegionScanner) {
3927+ HRegion.RegionScanner rs = (HRegion.RegionScanner) scanner;
3928+ HRegionInfo regionName = rs.getRegionName();
3929+ if (regionName.isMetaRegion()) {
3930+ // LOG.debug("High priority scanner request: " + scannerId);
3931+ return HIGH_QOS;
3932+ }
3933+ }
3934+ } else if (inv.getParameterClasses().length == 0) {
3935+ // Just let it through. This is getOnlineRegions, etc.
3936+ } else if (inv.getParameterClasses()[0] == byte[].class) {
3937+ // first arg is byte array, so assume this is a regionname:
3938+ if (isMetaRegion((byte[]) inv.getParameters()[0])) {
3939+ // LOG.debug("High priority with method: " + methodName +
3940+ // " and region: "
3941+ // + Bytes.toString((byte[]) inv.getParameters()[0]));
3942+ return HIGH_QOS;
3943+ }
3944+ } else if (inv.getParameterClasses()[0] == MultiAction.class) {
3945+ MultiAction ma = (MultiAction) inv.getParameters()[0];
3946+ Set<byte[]> regions = ma.getRegions();
3947+ // ok this sucks, but if any single of the actions touches a meta, the
3948+ // whole
3949+ // thing gets pingged high priority. This is a dangerous hack because
3950+ // people
3951+ // can get their multi action tagged high QOS by tossing a Get(.META.)
3952+ // AND this
3953+ // regionserver hosts META/-ROOT-
3954+ for (byte[] region : regions) {
3955+ if (isMetaRegion(region)) {
3956+ // LOG.debug("High priority multi with region: " +
3957+ // Bytes.toString(region));
3958+ return HIGH_QOS; // short circuit for the win.
3959+ }
3960+ }
3961+ }
3962+ // LOG.debug("Low priority: " + from.toString());
3963+ return NORMAL_QOS;
3964+ }
3965+ }
3966+
3967+ /**
3968+ * Creates all of the state that needs to be reconstructed in case we are
3969+ * doing a restart. This is shared between the constructor and restart(). Both
3970+ * call it.
3971+ *
3972+ * @throws IOException
3973+ * @throws InterruptedException
3974+ */
3975+ private void initialize() {
3976+ try {
3977+ initializeZooKeeper();
3978+ initializeThreads();
3979+ int nbBlocks = conf.getInt("hbase.regionserver.nbreservationblocks", 4);
3980+ for (int i = 0; i < nbBlocks; i++) {
3981+ reservedSpace.add(new byte[HConstants.DEFAULT_SIZE_RESERVATION_BLOCK]);
3982+ }
3983+ } catch (Throwable t) {
3984+ // Call stop if error or process will stick around for ever since server
3985+ // puts up non-daemon threads.
3986+ this.server.stop();
3987+ abort("Initialization of RS failed. Hence aborting RS.", t);
3988+ }
3989+ }
3990+
3991+ /**
3992+ * Bring up connection to zk ensemble and then wait until a master for this
3993+ * cluster and then after that, wait until cluster 'up' flag has been set.
3994+ * This is the order in which master does things.
3995+ * Finally put up a catalog tracker.
3996+ * @throws IOException
3997+ * @throws InterruptedException
3998+ */
3999+ private void initializeZooKeeper() throws IOException, InterruptedException {
4000+ // Open connection to zookeeper and set primary watcher
4001+ zooKeeper = new ZooKeeperWatcher(conf, REGIONSERVER + ":" +
4002+ serverInfo.getServerAddress().getPort(), this);
4003+
4004+ // Create the master address manager, register with zk, and start it. Then
4005+ // block until a master is available. No point in starting up if no master
4006+ // running.
4007+ this.masterAddressManager = new MasterAddressTracker(this.zooKeeper, this);
4008+ this.masterAddressManager.start();
4009+ blockAndCheckIfStopped(this.masterAddressManager);
4010+
4011+ // Wait on cluster being up. Master will set this flag up in zookeeper
4012+ // when ready.
4013+ this.clusterStatusTracker = new ClusterStatusTracker(this.zooKeeper, this);
4014+ this.clusterStatusTracker.start();
4015+ blockAndCheckIfStopped(this.clusterStatusTracker);
4016+
4017+ // Create the catalog tracker and start it;
4018+ this.catalogTracker = new CatalogTracker(this.zooKeeper, this.conf,
4019+ this, this.conf.getInt("hbase.regionserver.catalog.timeout", Integer.MAX_VALUE));
4020+ catalogTracker.start();
4021+ }
4022+
4023+ /**
4024+ * Utilty method to wait indefinitely on a znode availability while checking
4025+ * if the region server is shut down
4026+ * @param tracker znode tracker to use
4027+ * @throws IOException any IO exception, plus if the RS is stopped
4028+ * @throws InterruptedException
4029+ */
4030+ private void blockAndCheckIfStopped(ZooKeeperNodeTracker tracker)
4031+ throws IOException, InterruptedException {
4032+ while (tracker.blockUntilAvailable(this.msgInterval) == null) {
4033+ if (this.stopped) {
4034+ throw new IOException("Received the shutdown message while waiting.");
4035+ }
4036+ }
4037+ }
4038+
4039+ /**
4040+ * @return False if cluster shutdown in progress
4041+ */
4042+ private boolean isClusterUp() {
4043+ return this.clusterStatusTracker.isClusterUp();
4044+ }
4045+
4046+ private void initializeThreads() throws IOException {
4047+
4048+ // Cache flushing thread.
4049+ this.cacheFlusher = new MemStoreFlusher(conf, this);
4050+
4051+ // Compaction thread
4052+ this.compactSplitThread = new CompactSplitThread(this);
4053+
4054+ // Background thread to check for major compactions; needed if region
4055+ // has not gotten updates in a while. Make it run at a lesser frequency.
4056+ int multiplier = this.conf.getInt(HConstants.THREAD_WAKE_FREQUENCY
4057+ + ".multiplier", 1000);
4058+ this.majorCompactionChecker = new MajorCompactionChecker(this,
4059+ this.threadWakeFrequency * multiplier, this);
4060+
4061+ this.leases = new Leases((int) conf.getLong(
4062+ HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
4063+ HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD),
4064+ this.threadWakeFrequency);
4065+ }
4066+
4067+ /**
4068+ * The HRegionServer sticks in this loop until closed. It repeatedly checks in
4069+ * with the HMaster, sending heartbeats & reports, and receiving HRegion
4070+ * load/unload instructions.
4071+ */
4072+ public void run() {
4073+
4074+ try {
4075+ // Initialize threads and wait for a master
4076+ initialize();
4077+ } catch (Throwable e) {
4078+ abort("Fatal exception during initialization", e);
4079+ }
4080+
4081+ this.regionServerThread = Thread.currentThread();
4082+ try {
4083+ while (!this.stopped) {
4084+ if (tryReportForDuty()) break;
4085+ }
4086+ long lastMsg = 0;
4087+ List<HMsg> outboundMessages = new ArrayList<HMsg>();
4088+ // The main run loop.
4089+ for (int tries = 0; !this.stopped && isHealthy();) {
4090+ if (!isClusterUp()) {
4091+ if (isOnlineRegionsEmpty()) {
4092+ stop("Exiting; cluster shutdown set and not carrying any regions");
4093+ } else if (!this.stopping) {
4094+ this.stopping = true;
4095+ closeUserRegions(this.abortRequested);
4096+ } else if (this.stopping && LOG.isDebugEnabled()) {
4097+ LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString());
4098+ }
4099+ }
4100+ long now = System.currentTimeMillis();
4101+ // Drop into the send loop if msgInterval has elapsed or if something
4102+ // to send. If we fail talking to the master, then we'll sleep below
4103+ // on poll of the outboundMsgs blockingqueue.
4104+ if ((now - lastMsg) >= msgInterval || !outboundMessages.isEmpty()) {
4105+ try {
4106+ doMetrics();
4107+ tryRegionServerReport(outboundMessages);
4108+ lastMsg = System.currentTimeMillis();
4109+ // Reset tries count if we had a successful transaction.
4110+ tries = 0;
4111+ if (this.stopped) continue;
4112+ } catch (Exception e) { // FindBugs REC_CATCH_EXCEPTION
4113+ // Two special exceptions could be printed out here,
4114+ // PleaseHoldException and YouAreDeadException
4115+ if (e instanceof IOException) {
4116+ e = RemoteExceptionHandler.checkIOException((IOException) e);
4117+ }
4118+ if (e instanceof YouAreDeadException) {
4119+ // This will be caught and handled as a fatal error below
4120+ throw e;
4121+ }
4122+ tries++;
4123+ if (tries > 0 && (tries % this.numRetries) == 0) {
4124+ // Check filesystem every so often.
4125+ checkFileSystem();
4126+ }
4127+ if (this.stopped) {
4128+ continue;
4129+ }
4130+ LOG.warn("Attempt=" + tries, e);
4131+ // No point retrying immediately; this is probably connection to
4132+ // master issue. Doing below will cause us to sleep.
4133+ lastMsg = System.currentTimeMillis();
4134+ }
4135+ }
4136+ now = System.currentTimeMillis();
4137+ HMsg msg = this.outboundMsgs.poll((msgInterval - (now - lastMsg)), TimeUnit.MILLISECONDS);
4138+ if (msg != null) outboundMessages.add(msg);
4139+ } // for
4140+ } catch (Throwable t) {
4141+ if (!checkOOME(t)) {
4142+ abort("Unhandled exception: " + t.getMessage(), t);
4143+ }
4144+ }
4145+ this.leases.closeAfterLeasesExpire();
4146+ this.server.stop();
4147+ if (this.splitLogWorker != null) {
4148+ splitLogWorker.stop();
4149+ }
4150+ if (this.infoServer != null) {
4151+ LOG.info("Stopping infoServer");
4152+ try {
4153+ this.infoServer.stop();
4154+ } catch (Exception e) {
4155+ e.printStackTrace();
4156+ }
4157+ }
4158+ // Send cache a shutdown.
4159+ LruBlockCache c = (LruBlockCache) StoreFile.getBlockCache(this.conf);
4160+ if (c != null) {
4161+ c.shutdown();
4162+ }
4163+
4164+ // Send interrupts to wake up threads if sleeping so they notice shutdown.
4165+ // TODO: Should we check they are alive? If OOME could have exited already
4166+ if (this.cacheFlusher != null) this.cacheFlusher.interruptIfNecessary();
4167+ if (this.compactSplitThread != null) this.compactSplitThread.interruptIfNecessary();
4168+ if (this.hlogRoller != null) this.hlogRoller.interruptIfNecessary();
4169+ if (this.majorCompactionChecker != null) this.majorCompactionChecker.interrupt();
4170+
4171+ if (this.killed) {
4172+ // Just skip out w/o closing regions.
4173+ } else if (abortRequested) {
4174+ if (this.fsOk) {
4175+ closeAllRegions(abortRequested); // Don't leave any open file handles
4176+ closeWAL(false);
4177+ }
4178+ LOG.info("aborting server at: " + this.serverInfo.getServerName());
4179+ } else {
4180+ closeAllRegions(abortRequested);
4181+ closeWAL(true);
4182+ closeAllScanners();
4183+ LOG.info("stopping server at: " + this.serverInfo.getServerName());
4184+ }
4185+ // Interrupt catalog tracker here in case any regions being opened out in
4186+ // handlers are stuck waiting on meta or root.
4187+ if (this.catalogTracker != null) this.catalogTracker.stop();
4188+ if (this.fsOk)
4189+ waitOnAllRegionsToClose(abortRequested);
4190+
4191+ // Make sure the proxy is down.
4192+ if (this.hbaseMaster != null) {
4193+ HBaseRPC.stopProxy(this.hbaseMaster);
4194+ this.hbaseMaster = null;
4195+ }
4196+ this.leases.close();
4197+ this.zooKeeper.close();
4198+ if (!killed) {
4199+ join();
4200+ }
4201+ LOG.info(Thread.currentThread().getName() + " exiting");
4202+ }
4203+
4204+ String getOnlineRegionsAsPrintableString() {
4205+ StringBuilder sb = new StringBuilder();
4206+ for (HRegion r: this.onlineRegions.values()) {
4207+ if (sb.length() > 0) sb.append(", ");
4208+ sb.append(r.getRegionInfo().getEncodedName());
4209+ }
4210+ return sb.toString();
4211+ }
4212+
4213+ /**
4214+ * Wait on regions close.
4215+ */
4216+ private void waitOnAllRegionsToClose(final boolean abort) {
4217+ // Wait till all regions are closed before going out.
4218+ int lastCount = -1;
4219+ while (!isOnlineRegionsEmpty()) {
4220+ int count = getNumberOfOnlineRegions();
4221+ // Only print a message if the count of regions has changed.
4222+ if (count != lastCount) {
4223+ lastCount = count;
4224+ LOG.info("Waiting on " + count + " regions to close");
4225+ // Only print out regions still closing if a small number else will
4226+ // swamp the log.
4227+ if (count < 10 && LOG.isDebugEnabled()) {
4228+ LOG.debug(this.onlineRegions);
4229+ }
4230+ }
4231+ // Ensure all user regions have been sent to close. Use this to
4232+ // protect against the case where an open comes in after we start the
4233+ // iterator of onlineRegions to close all user regions.
4234+ for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
4235+ HRegionInfo hri = e.getValue().getRegionInfo();
4236+ if (!this.regionsInTransitionInRS.contains(hri.getEncodedNameAsBytes())) {
4237+ // Don't update zk with this close transition; pass false.
4238+ closeRegion(hri, abort, false);
4239+ }
4240+ }
4241+ Threads.sleep(1000);
4242+ }
4243+ }
4244+
4245+ List<HMsg> tryRegionServerReport(final List<HMsg> outboundMessages)
4246+ throws IOException {
4247+ this.serverInfo.setLoad(buildServerLoad());
4248+ this.requestCount.set(0);
4249+ addOutboundMsgs(outboundMessages);
4250+ HMsg [] msgs = null;
4251+ while (!this.stopped) {
4252+ try {
4253+ msgs = this.hbaseMaster.regionServerReport(this.serverInfo,
4254+ outboundMessages.toArray(HMsg.EMPTY_HMSG_ARRAY),
4255+ getMostLoadedRegions());
4256+ break;
4257+ } catch (IOException ioe) {
4258+ if (ioe instanceof RemoteException) {
4259+ ioe = ((RemoteException)ioe).unwrapRemoteException();
4260+ }
4261+ if (ioe instanceof YouAreDeadException) {
4262+ // This will be caught and handled as a fatal error in run()
4263+ throw ioe;
4264+ }
4265+ LOG.warn("RemoteException connecting to master", ioe);
4266+ // Couldn't connect to the master, get location from zk and reconnect
4267+ // Method blocks until new master is found or we are stopped
4268+ getMaster();
4269+ }
4270+ }
4271+ updateOutboundMsgs(outboundMessages);
4272+ outboundMessages.clear();
4273+
4274+ for (int i = 0; !this.stopped && msgs != null && i < msgs.length; i++) {
4275+ LOG.info(msgs[i].toString());
4276+ // Intercept stop regionserver messages
4277+ if (msgs[i].getType().equals(HMsg.Type.STOP_REGIONSERVER)) {
4278+ stop("Received " + msgs[i]);
4279+ continue;
4280+ }
4281+ LOG.warn("NOT PROCESSING " + msgs[i] + " -- WHY IS MASTER SENDING IT TO US?");
4282+ }
4283+ return outboundMessages;
4284+ }
4285+
4286+ private HServerLoad buildServerLoad() {
4287+ MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
4288+ HServerLoad hsl = new HServerLoad(requestCount.get(),
4289+ (int)(memory.getUsed() / 1024 / 1024),
4290+ (int) (memory.getMax() / 1024 / 1024));
4291+ for (HRegion r : this.onlineRegions.values()) {
4292+ hsl.addRegionInfo(createRegionLoad(r));
4293+ }
4294+ return hsl;
4295+ }
4296+
4297+ private void closeWAL(final boolean delete) {
4298+ try {
4299+ if (this.hlog != null) {
4300+ if (delete) {
4301+ hlog.closeAndDelete();
4302+ } else {
4303+ hlog.close();
4304+ }
4305+ }
4306+ } catch (Throwable e) {
4307+ LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e));
4308+ }
4309+ }
4310+
4311+ private void closeAllScanners() {
4312+ // Close any outstanding scanners. Means they'll get an UnknownScanner
4313+ // exception next time they come in.
4314+ for (Map.Entry<String, InternalScanner> e : this.scanners.entrySet()) {
4315+ try {
4316+ e.getValue().close();
4317+ } catch (IOException ioe) {
4318+ LOG.warn("Closing scanner " + e.getKey(), ioe);
4319+ }
4320+ }
4321+ }
4322+
4323+ /*
4324+ * Add to the passed <code>msgs</code> messages to pass to the master.
4325+ *
4326+ * @param msgs Current outboundMsgs array; we'll add messages to this List.
4327+ */
4328+ private void addOutboundMsgs(final List<HMsg> msgs) {
4329+ if (msgs.isEmpty()) {
4330+ this.outboundMsgs.drainTo(msgs);
4331+ return;
4332+ }
4333+ OUTER: for (HMsg m : this.outboundMsgs) {
4334+ for (HMsg mm : msgs) {
4335+ // Be careful don't add duplicates.
4336+ if (mm.equals(m)) {
4337+ continue OUTER;
4338+ }
4339+ }
4340+ msgs.add(m);
4341+ }
4342+ }
4343+
4344+ /*
4345+ * Remove from this.outboundMsgs those messsages we sent the master.
4346+ *
4347+ * @param msgs Messages we sent the master.
4348+ */
4349+ private void updateOutboundMsgs(final List<HMsg> msgs) {
4350+ if (msgs.isEmpty()) {
4351+ return;
4352+ }
4353+ for (HMsg m : this.outboundMsgs) {
4354+ for (HMsg mm : msgs) {
4355+ if (mm.equals(m)) {
4356+ this.outboundMsgs.remove(m);
4357+ break;
4358+ }
4359+ }
4360+ }
4361+ }
4362+
4363+ /*
4364+ * Run init. Sets up hlog and starts up all server threads.
4365+ *
4366+ * @param c Extra configuration.
4367+ */
4368+ protected void handleReportForDutyResponse(final MapWritable c) throws IOException {
4369+ try {
4370+ for (Map.Entry<Writable, Writable> e : c.entrySet()) {
4371+
4372+ String key = e.getKey().toString();
4373+ // Use the address the master passed us
4374+ if (key.equals("hbase.regionserver.address")) {
4375+ HServerAddress hsa = (HServerAddress) e.getValue();
4376+ LOG.info("Master passed us address to use. Was="
4377+ + this.serverInfo.getServerAddress() + ", Now=" + hsa.toString());
4378+ this.serverInfo.setServerAddress(hsa);
4379+ continue;
4380+ }
4381+ String value = e.getValue().toString();
4382+ if (LOG.isDebugEnabled()) {
4383+ LOG.debug("Config from master: " + key + "=" + value);
4384+ }
4385+ this.conf.set(key, value);
4386+ }
4387+ // hack! Maps DFSClient => RegionServer for logs. HDFS made this
4388+ // config param for task trackers, but we can piggyback off of it.
4389+ if (this.conf.get("mapred.task.id") == null) {
4390+ this.conf.set("mapred.task.id",
4391+ "hb_rs_" + this.serverInfo.getServerName() + "_" +
4392+ System.currentTimeMillis());
4393+ }
4394+
4395+ // Master sent us hbase.rootdir to use. Should be fully qualified
4396+ // path with file system specification included. Set 'fs.defaultFS'
4397+ // to match the filesystem on hbase.rootdir else underlying hadoop hdfs
4398+ // accessors will be going against wrong filesystem (unless all is set
4399+ // to defaults).
4400+ this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir"));
4401+ // Get fs instance used by this RS
4402+ this.fs = FileSystem.get(this.conf);
4403+ this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
4404+ this.hlog = setupWALAndReplication();
4405+ // Init in here rather than in constructor after thread name has been set
4406+ this.metrics = new RegionServerMetrics();
4407+ startServiceThreads();
4408+ LOG.info("Serving as " + this.serverInfo.getServerName() +
4409+ ", RPC listening on " + this.server.getListenerAddress() +
4410+ ", sessionid=0x" +
4411+ Long.toHexString(this.zooKeeper.getZooKeeper().getSessionId()));
4412+ isOnline = true;
4413+ } catch (Throwable e) {
4414+ this.isOnline = false;
4415+ stop("Failed initialization");
4416+ throw convertThrowableToIOE(cleanup(e, "Failed init"),
4417+ "Region server startup failed");
4418+ }
4419+ }
4420+
4421+ /*
4422+ * @param r Region to get RegionLoad for.
4423+ *
4424+ * @return RegionLoad instance.
4425+ *
4426+ * @throws IOException
4427+ */
4428+ private HServerLoad.RegionLoad createRegionLoad(final HRegion r) {
4429+ byte[] name = r.getRegionName();
4430+ int stores = 0;
4431+ int storefiles = 0;
4432+ int storefileSizeMB = 0;
4433+ int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024);
4434+ int storefileIndexSizeMB = 0;
4435+ synchronized (r.stores) {
4436+ stores += r.stores.size();
4437+ for (Store store : r.stores.values()) {
4438+ storefiles += store.getStorefilesCount();
4439+ storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
4440+ storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
4441+ }
4442+ }
4443+ return new HServerLoad.RegionLoad(name, stores, storefiles,
4444+ storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB);
4445+ }
4446+
4447+ /**
4448+ * @param encodedRegionName
4449+ * @return An instance of RegionLoad.
4450+ * @throws IOException
4451+ */
4452+ public HServerLoad.RegionLoad createRegionLoad(final String encodedRegionName) {
4453+ HRegion r = null;
4454+ r = this.onlineRegions.get(encodedRegionName);
4455+ return r != null ? createRegionLoad(r) : null;
4456+ }
4457+
4458+ /*
4459+ * Cleanup after Throwable caught invoking method. Converts <code>t</code> to
4460+ * IOE if it isn't already.
4461+ *
4462+ * @param t Throwable
4463+ *
4464+ * @return Throwable converted to an IOE; methods can only let out IOEs.
4465+ */
4466+ private Throwable cleanup(final Throwable t) {
4467+ return cleanup(t, null);
4468+ }
4469+
4470+ /*
4471+ * Cleanup after Throwable caught invoking method. Converts <code>t</code> to
4472+ * IOE if it isn't already.
4473+ *
4474+ * @param t Throwable
4475+ *
4476+ * @param msg Message to log in error. Can be null.
4477+ *
4478+ * @return Throwable converted to an IOE; methods can only let out IOEs.
4479+ */
4480+ private Throwable cleanup(final Throwable t, final String msg) {
4481+ // Don't log as error if NSRE; NSRE is 'normal' operation.
4482+ if (t instanceof NotServingRegionException) {
4483+ LOG.debug("NotServingRegionException; " + t.getMessage());
4484+ return t;
4485+ }
4486+ if (msg == null) {
4487+ LOG.error("", RemoteExceptionHandler.checkThrowable(t));
4488+ } else {
4489+ LOG.error(msg, RemoteExceptionHandler.checkThrowable(t));
4490+ }
4491+ if (!checkOOME(t)) {
4492+ checkFileSystem();
4493+ }
4494+ return t;
4495+ }
4496+
4497+ /*
4498+ * @param t
4499+ *
4500+ * @return Make <code>t</code> an IOE if it isn't already.
4501+ */
4502+ private IOException convertThrowableToIOE(final Throwable t) {
4503+ return convertThrowableToIOE(t, null);
4504+ }
4505+
4506+ /*
4507+ * @param t
4508+ *
4509+ * @param msg Message to put in new IOE if passed <code>t</code> is not an IOE
4510+ *
4511+ * @return Make <code>t</code> an IOE if it isn't already.
4512+ */
4513+ private IOException convertThrowableToIOE(final Throwable t, final String msg) {
4514+ return (t instanceof IOException ? (IOException) t : msg == null
4515+ || msg.length() == 0 ? new IOException(t) : new IOException(msg, t));
4516+ }
4517+
4518+ /*
4519+ * Check if an OOME and if so, call abort.
4520+ *
4521+ * @param e
4522+ *
4523+ * @return True if we OOME'd and are aborting.
4524+ */
4525+ public boolean checkOOME(final Throwable e) {
4526+ boolean stop = false;
4527+ if (e instanceof OutOfMemoryError
4528+ || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError)
4529+ || (e.getMessage() != null && e.getMessage().contains(
4530+ "java.lang.OutOfMemoryError"))) {
4531+ abort("OutOfMemoryError, aborting", e);
4532+ stop = true;
4533+ }
4534+ return stop;
4535+ }
4536+
4537+ /**
4538+ * Checks to see if the file system is still accessible. If not, sets
4539+ * abortRequested and stopRequested
4540+ *
4541+ * @return false if file system is not available
4542+ */
4543+ protected boolean checkFileSystem() {
4544+ if (this.fsOk && this.fs != null) {
4545+ try {
4546+ FSUtils.checkFileSystemAvailable(this.fs);
4547+ } catch (IOException e) {
4548+ abort("File System not available", e);
4549+ this.fsOk = false;
4550+ }
4551+ }
4552+ return this.fsOk;
4553+ }
4554+
4555+ /*
4556+ * Inner class that runs on a long period checking if regions need major
4557+ * compaction.
4558+ */
4559+ private static class MajorCompactionChecker extends Chore {
4560+ private final HRegionServer instance;
4561+ private int majorCompactPriority;
4562+ private final static int DEFAULT_PRIORITY = Integer.MAX_VALUE;
4563+
4564+
4565+ MajorCompactionChecker(final HRegionServer h, final int sleepTime,
4566+ final Stoppable stopper) {
4567+ super("MajorCompactionChecker", sleepTime, h);
4568+ this.instance = h;
4569+ LOG.info("Runs every " + sleepTime + "ms");
4570+
4571+ /*
4572+ * MajorCompactPriority is configurable.
4573+ * If not set, the compaction will use default priority.
4574+ */
4575+ majorCompactPriority = this.instance.conf.getInt(
4576+ "hbase.regionserver.compactionChecker.majorCompactPriority",
4577+ DEFAULT_PRIORITY);
4578+ }
4579+
4580+ @Override
4581+ protected void chore() {
4582+ for (HRegion r : this.instance.onlineRegions.values()) {
4583+ try {
4584+ if (r != null && r.isMajorCompaction()) {
4585+ // Queue a compaction. Will recognize if major is needed.
4586+ if(majorCompactPriority == DEFAULT_PRIORITY ||
4587+ majorCompactPriority > r.getCompactPriority()){
4588+ this.instance.compactSplitThread.requestCompaction(r, getName()
4589+ + " requests major compaction use default priority");
4590+ } else {
4591+ this.instance.compactSplitThread.requestCompaction(r, getName()
4592+ + " requests major compaction use configured priority",
4593+ this.majorCompactPriority);
4594+ }
4595+ }
4596+ } catch (IOException e) {
4597+ LOG.warn("Failed major compaction check on " + r, e);
4598+ }
4599+ }
4600+ }
4601+ }
4602+
4603+ /**
4604+ * Report the status of the server. A server is online once all the startup is
4605+ * completed (setting up filesystem, starting service threads, etc.). This
4606+ * method is designed mostly to be useful in tests.
4607+ *
4608+ * @return true if online, false if not.
4609+ */
4610+ public boolean isOnline() {
4611+ return isOnline;
4612+ }
4613+
4614+ /**
4615+ * Setup WAL log and replication if enabled.
4616+ * Replication setup is done in here because it wants to be hooked up to WAL.
4617+ * @return A WAL instance.
4618+ * @throws IOException
4619+ */
4620+ private HLog setupWALAndReplication() throws IOException {
4621+ final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
4622+ Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverInfo));
4623+ if (LOG.isDebugEnabled()) {
4624+ LOG.debug("logdir=" + logdir);
4625+ }
4626+ if (this.fs.exists(logdir)) {
4627+ throw new RegionServerRunningException("Region server already "
4628+ + "running at " + this.serverInfo.getServerName()
4629+ + " because logdir " + logdir.toString() + " exists");
4630+ }
4631+
4632+ // Instantiate replication manager if replication enabled. Pass it the
4633+ // log directories.
4634+ try {
4635+ this.replicationHandler = Replication.isReplication(this.conf)?
4636+ new Replication(this, this.fs, logdir, oldLogDir): null;
4637+ } catch (KeeperException e) {
4638+ throw new IOException("Failed replication handler create", e);
4639+ }
4640+ return instantiateHLog(logdir, oldLogDir);
4641+ }
4642+
4643+ /**
4644+ * Called by {@link #setupWALAndReplication()} creating WAL instance.
4645+ * @param logdir
4646+ * @param oldLogDir
4647+ * @return WAL instance.
4648+ * @throws IOException
4649+ */
4650+ protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException {
4651+ return new HLog(this.fs, logdir, oldLogDir, this.conf,
4652+ getWALActionListeners(), this.serverInfo.getServerAddress().toString());
4653+ }
4654+
4655+ /**
4656+ * Called by {@link #instantiateHLog(Path, Path)} setting up WAL instance.
4657+ * Add any {@link WALObserver}s you want inserted before WAL startup.
4658+ * @return List of WALActionsListener that will be passed in to
4659+ * {@link HLog} on construction.
4660+ */
4661+ protected List<WALObserver> getWALActionListeners() {
4662+ List<WALObserver> listeners = new ArrayList<WALObserver>();
4663+ // Log roller.
4664+ this.hlogRoller = new LogRoller(this, this);
4665+ listeners.add(this.hlogRoller);
4666+ if (this.replicationHandler != null) {
4667+ // Replication handler is an implementation of WALActionsListener.
4668+ listeners.add(this.replicationHandler);
4669+ }
4670+ return listeners;
4671+ }
4672+
4673+ protected LogRoller getLogRoller() {
4674+ return hlogRoller;
4675+ }
4676+
4677+ /*
4678+ * @param interval Interval since last time metrics were called.
4679+ */
4680+ protected void doMetrics() {
4681+ try {
4682+ metrics();
4683+ } catch (Throwable e) {
4684+ LOG.warn("Failed metrics", e);
4685+ }
4686+ }
4687+
4688+ protected void metrics() {
4689+ int seconds = this.msgInterval / 1000;
4690+ if(0 == seconds){
4691+ seconds = 1;
4692+ }
4693+ this.metrics.regions.set(this.onlineRegions.size());
4694+ this.metrics.requests.set(this.requestCount.get()/seconds);
4695+ // Is this too expensive every three seconds getting a lock on onlineRegions
4696+ // and then per store carried? Can I make metrics be sloppier and avoid
4697+ // the synchronizations?
4698+ int stores = 0;
4699+ int storefiles = 0;
4700+ long memstoreSize = 0;
4701+ long storefileIndexSize = 0;
4702+ for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
4703+ HRegion r = e.getValue();
4704+ memstoreSize += r.memstoreSize.get();
4705+ synchronized (r.stores) {
4706+ stores += r.stores.size();
4707+ for (Map.Entry<byte[], Store> ee : r.stores.entrySet()) {
4708+ Store store = ee.getValue();
4709+ storefiles += store.getStorefilesCount();
4710+ storefileIndexSize += store.getStorefilesIndexSize();
4711+ }
4712+ }
4713+ }
4714+ this.metrics.stores.set(stores);
4715+ this.metrics.storefiles.set(storefiles);
4716+ this.metrics.memstoreSizeMB.set((int) (memstoreSize / (1024 * 1024)));
4717+ this.metrics.storefileIndexSizeMB
4718+ .set((int) (storefileIndexSize / (1024 * 1024)));
4719+ this.metrics.compactionQueueSize.set(compactSplitThread
4720+ .getCompactionQueueSize());
4721+ this.metrics.flushQueueSize.set(cacheFlusher
4722+ .getFlushQueueSize());
4723+
4724+ LruBlockCache lruBlockCache = (LruBlockCache) StoreFile.getBlockCache(conf);
4725+ if (lruBlockCache != null) {
4726+ this.metrics.blockCacheCount.set(lruBlockCache.size());
4727+ this.metrics.blockCacheFree.set(lruBlockCache.getFreeSize());
4728+ this.metrics.blockCacheSize.set(lruBlockCache.getCurrentSize());
4729+ CacheStats cacheStats = lruBlockCache.getStats();
4730+ this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
4731+ this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
4732+ this.metrics.blockCacheEvictedCount.set(lruBlockCache.getEvictedCount());
4733+ double ratio = lruBlockCache.getStats().getHitRatio();
4734+ int percent = (int) (ratio * 100);
4735+ this.metrics.blockCacheHitRatio.set(percent);
4736+ ratio = lruBlockCache.getStats().getHitCachingRatio();
4737+ percent = (int) (ratio * 100);
4738+ this.metrics.blockCacheHitCachingRatio.set(percent);
4739+ }
4740+ }
4741+
4742+ /**
4743+ * @return Region server metrics instance.
4744+ */
4745+ public RegionServerMetrics getMetrics() {
4746+ return this.metrics;
4747+ }
4748+
4749+ /*
4750+ * Start maintanence Threads, Server, Worker and lease checker threads.
4751+ * Install an UncaughtExceptionHandler that calls abort of RegionServer if we
4752+ * get an unhandled exception. We cannot set the handler on all threads.
4753+ * Server's internal Listener thread is off limits. For Server, if an OOME, it
4754+ * waits a while then retries. Meantime, a flush or a compaction that tries to
4755+ * run should trigger same critical condition and the shutdown will run. On
4756+ * its way out, this server will shut down Server. Leases are sort of
4757+ * inbetween. It has an internal thread that while it inherits from Chore, it
4758+ * keeps its own internal stop mechanism so needs to be stopped by this
4759+ * hosting server. Worker logs the exception and exits.
4760+ */
4761+ private void startServiceThreads() throws IOException {
4762+ String n = Thread.currentThread().getName();
4763+ UncaughtExceptionHandler handler = new UncaughtExceptionHandler() {
4764+ public void uncaughtException(Thread t, Throwable e) {
4765+ abort("Uncaught exception in service thread " + t.getName(), e);
4766+ }
4767+ };
4768+
4769+ // Start executor services
4770+ this.service = new ExecutorService(getServerName());
4771+ this.service.startExecutorService(ExecutorType.RS_OPEN_REGION,
4772+ conf.getInt("hbase.regionserver.executor.openregion.threads", 3));
4773+ this.service.startExecutorService(ExecutorType.RS_OPEN_ROOT,
4774+ conf.getInt("hbase.regionserver.executor.openroot.threads", 1));
4775+ this.service.startExecutorService(ExecutorType.RS_OPEN_META,
4776+ conf.getInt("hbase.regionserver.executor.openmeta.threads", 1));
4777+ this.service.startExecutorService(ExecutorType.RS_CLOSE_REGION,
4778+ conf.getInt("hbase.regionserver.executor.closeregion.threads", 3));
4779+ this.service.startExecutorService(ExecutorType.RS_CLOSE_ROOT,
4780+ conf.getInt("hbase.regionserver.executor.closeroot.threads", 1));
4781+ this.service.startExecutorService(ExecutorType.RS_CLOSE_META,
4782+ conf.getInt("hbase.regionserver.executor.closemeta.threads", 1));
4783+
4784+ Threads.setDaemonThreadRunning(this.hlogRoller, n + ".logRoller", handler);
4785+ Threads.setDaemonThreadRunning(this.cacheFlusher, n + ".cacheFlusher",
4786+ handler);
4787+ Threads.setDaemonThreadRunning(this.compactSplitThread, n + ".compactor",
4788+ handler);
4789+ Threads.setDaemonThreadRunning(this.majorCompactionChecker, n
4790+ + ".majorCompactionChecker", handler);
4791+
4792+ // Leases is not a Thread. Internally it runs a daemon thread. If it gets
4793+ // an unhandled exception, it will just exit.
4794+ this.leases.setName(n + ".leaseChecker");
4795+ this.leases.start();
4796+ // Put up info server.
4797+ int port = this.conf.getInt("hbase.regionserver.info.port", 60030);
4798+ // -1 is for disabling info server
4799+ if (port >= 0) {
4800+ String addr = this.conf.get("hbase.regionserver.info.bindAddress",
4801+ "0.0.0.0");
4802+ // check if auto port bind enabled
4803+ boolean auto = this.conf.getBoolean("hbase.regionserver.info.port.auto",
4804+ false);
4805+ while (true) {
4806+ try {
4807+ this.infoServer = new InfoServer(REGIONSERVER, addr, port, false);
4808+ this.infoServer.addServlet("status", "/rs-status", RSStatusServlet.class);
4809+ this.infoServer.addServlet("dump", "/dump", RSDumpServlet.class);
4810+ this.infoServer.setAttribute("regionserver", this);
4811+ this.infoServer.start();
4812+ break;
4813+ } catch (BindException e) {
4814+ if (!auto) {
4815+ // auto bind disabled throw BindException
4816+ throw e;
4817+ }
4818+ // auto bind enabled, try to use another port
4819+ LOG.info("Failed binding http info server to port: " + port);
4820+ port++;
4821+ // update HRS server info port.
4822+ this.serverInfo = new HServerInfo(this.serverInfo.getServerAddress(),
4823+ this.serverInfo.getStartCode(), port,
4824+ this.serverInfo.getHostname());
4825+ }
4826+ }
4827+ }
4828+
4829+ if (this.replicationHandler != null) {
4830+ this.replicationHandler.startReplicationServices();
4831+ }
4832+
4833+ // Start Server. This service is like leases in that it internally runs
4834+ // a thread.
4835+ this.server.start();
4836+
4837+ // Create the log splitting worker and start it
4838+ this.splitLogWorker = new SplitLogWorker(this.zooKeeper,
4839+ this.getConfiguration(), this.getServerName().toString());
4840+ splitLogWorker.start();
4841+ }
4842+
4843+ /*
4844+ * Verify that server is healthy
4845+ */
4846+ private boolean isHealthy() {
4847+ if (!fsOk) {
4848+ // File system problem
4849+ return false;
4850+ }
4851+ // Verify that all threads are alive
4852+ if (!(leases.isAlive() && compactSplitThread.isAlive()
4853+ && cacheFlusher.isAlive() && hlogRoller.isAlive()
4854+ && this.majorCompactionChecker.isAlive())) {
4855+ stop("One or more threads are no longer alive -- stop");
4856+ return false;
4857+ }
4858+ return true;
4859+ }
4860+
4861+ @Override
4862+ public HLog getWAL() {
4863+ return this.hlog;
4864+ }
4865+
4866+ @Override
4867+ public CatalogTracker getCatalogTracker() {
4868+ return this.catalogTracker;
4869+ }
4870+
4871+ @Override
4872+ public void stop(final String msg) {
4873+ this.stopped = true;
4874+ LOG.info("STOPPED: " + msg);
4875+ synchronized (this) {
4876+ // Wakes run() if it is sleeping
4877+ notifyAll(); // FindBugs NN_NAKED_NOTIFY
4878+ }
4879+ }
4880+
4881+ @Override
4882+ public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
4883+ final boolean daughter)
4884+ throws KeeperException, IOException {
4885+ // Do checks to see if we need to compact (references or too many files)
4886+ if (r.hasReferences() || r.hasTooManyStoreFiles()) {
4887+ getCompactionRequester().requestCompaction(r,
4888+ r.hasReferences()? "Region has references on open" :
4889+ "Region has too many store files");
4890+ }
4891+
4892+ // Add to online regions if all above was successful.
4893+ addToOnlineRegions(r);
4894+
4895+ // Update ZK, ROOT or META
4896+ if (r.getRegionInfo().isRootRegion()) {
4897+ RootLocationEditor.setRootLocation(getZooKeeper(),
4898+ getServerInfo().getServerAddress());
4899+ } else if (r.getRegionInfo().isMetaRegion()) {
4900+ MetaEditor.updateMetaLocation(ct, r.getRegionInfo(), getServerInfo());
4901+ } else {
4902+ if (daughter) {
4903+ // If daughter of a split, update whole row, not just location.
4904+ MetaEditor.addDaughter(ct, r.getRegionInfo(), getServerInfo());
4905+ } else {
4906+ MetaEditor.updateRegionLocation(ct, r.getRegionInfo(), getServerInfo());
4907+ }
4908+ }
4909+ }
4910+
4911+ /**
4912+ * Return a reference to the metrics instance used for counting RPC calls.
4913+ * @return Metrics instance.
4914+ */
4915+ public HBaseRpcMetrics getRpcMetrics() {
4916+ return server.getRpcMetrics();
4917+ }
4918+
4919+ /**
4920+ * Cause the server to exit without closing the regions it is serving, the log
4921+ * it is using and without notifying the master. Used unit testing and on
4922+ * catastrophic events such as HDFS is yanked out from under hbase or we OOME.
4923+ *
4924+ * @param reason
4925+ * the reason we are aborting
4926+ * @param cause
4927+ * the exception that caused the abort, or null
4928+ */
4929+ public void abort(String reason, Throwable cause) {
4930+ String msg = "ABORTING region server " + this + ": " + reason;
4931+ if (cause != null) {
4932+ LOG.fatal(msg, cause);
4933+ } else {
4934+ LOG.fatal(msg);
4935+ }
4936+ this.abortRequested = true;
4937+ this.reservedSpace.clear();
4938+ if (this.metrics != null) {
4939+ LOG.info("Dump of metrics: " + this.metrics);
4940+ }
4941+ // Do our best to report our abort to the master, but this may not work
4942+ try {
4943+ if (cause != null) {
4944+ msg += "\nCause:\n" + StringUtils.stringifyException(cause);
4945+ }
4946+ if (hbaseMaster != null) {
4947+ hbaseMaster.reportRSFatalError(serverInfo, msg);
4948+ }
4949+ } catch (Throwable t) {
4950+ LOG.warn("Unable to report fatal error to master", t);
4951+ }
4952+ stop(reason);
4953+ }
4954+
4955+ /**
4956+ * @see HRegionServer#abort(String, Throwable)
4957+ */
4958+ public void abort(String reason) {
4959+ abort(reason, null);
4960+ }
4961+
4962+ /*
4963+ * Simulate a kill -9 of this server. Exits w/o closing regions or cleaninup
4964+ * logs but it does close socket in case want to bring up server on old
4965+ * hostname+port immediately.
4966+ */
4967+ protected void kill() {
4968+ this.killed = true;
4969+ abort("Simulated kill");
4970+ }
4971+
4972+ /**
4973+ * Wait on all threads to finish. Presumption is that all closes and stops
4974+ * have already been called.
4975+ */
4976+ protected void join() {
4977+ Threads.shutdown(this.majorCompactionChecker);
4978+ Threads.shutdown(this.cacheFlusher);
4979+ Threads.shutdown(this.compactSplitThread);
4980+ Threads.shutdown(this.hlogRoller);
4981+ this.service.shutdown();
4982+ if (this.replicationHandler != null) {
4983+ this.replicationHandler.join();
4984+ }
4985+ }
4986+
4987+ /**
4988+ * Get the current master from ZooKeeper and open the RPC connection to it.
4989+ *
4990+ * Method will block until a master is available. You can break from this
4991+ * block by requesting the server stop.
4992+ *
4993+ * @return master address, or null if server has been stopped
4994+ */
4995+ private HServerAddress getMaster() {
4996+ HServerAddress masterAddress = null;
4997+ HMasterRegionInterface master = null;
4998+
4999+ while (!stopped && master == null) {
5000+
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches