geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jensde...@apache.org
Subject [44/50] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17
Date Thu, 25 Feb 2016 20:27:32 GMT
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c01d5f4/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
----------------------------------------------------------------------
diff --cc geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
index 0000000,312472b..9ea8459
mode 000000,100644..100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/security/AuthorizeRequest.java
@@@ -1,0 -1,757 +1,769 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package com.gemstone.gemfire.internal.security;
+ 
+ import java.io.Serializable;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.security.Principal;
+ import java.util.Collection;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ 
+ import com.gemstone.gemfire.cache.Cache;
+ import com.gemstone.gemfire.cache.InterestResultPolicy;
+ import com.gemstone.gemfire.cache.operations.*;
+ import com.gemstone.gemfire.cache.operations.internal.GetOperationContextImpl;
+ import com.gemstone.gemfire.distributed.DistributedMember;
+ import com.gemstone.gemfire.i18n.LogWriterI18n;
+ import com.gemstone.gemfire.internal.ClassLoadUtil;
+ import com.gemstone.gemfire.internal.cache.operations.ContainsKeyOperationContext;
+ import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
+ import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+ import com.gemstone.gemfire.security.AccessControl;
+ import com.gemstone.gemfire.security.NotAuthorizedException;
+ 
+ /**
+  * This class implements authorization calls for various operations. It provides
+  * methods to invoke authorization callback ({@link AccessControl#authorizeOperation})
+  * before the actual operation to check for authorization (pre-processing) that
+  * may modify the arguments to the operations. The data being passed for the
+  * operation is encapsulated in a {@link OperationContext} object that can be
+  * modified by the pre-processing authorization callbacks.
+  * 
+  * @author Sumedh Wale
+  * @since 5.5
+  */
+ public class AuthorizeRequest {
+ 
+   private AccessControl authzCallback;
+ 
+   private final Principal principal;
+ 
+   private boolean isPrincipalSerializable;
+ 
+   private ClientProxyMembershipID id; 
+ 
+   private final LogWriterI18n logger;
+ 
+   public AuthorizeRequest(String authzFactoryName, DistributedMember dm,
+       Principal principal, Cache cache) throws ClassNotFoundException,
+       NoSuchMethodException, IllegalAccessException, InvocationTargetException,
+       NotAuthorizedException {
+ 
+     this.principal = principal;
+     if (this.principal instanceof Serializable) {
+       this.isPrincipalSerializable = true;
+     }
+     else {
+       this.isPrincipalSerializable = false;
+     }
+ 
+     this.logger = cache.getSecurityLoggerI18n();
+     Method authzMethod = ClassLoadUtil.methodFromName(authzFactoryName);
+     this.authzCallback = (AccessControl)authzMethod
+         .invoke(null, (Object[])null);
+     this.authzCallback.init(principal, dm, cache);  
+     this.id = null;
+   }
+   
+   public AuthorizeRequest(String authzFactoryName, ClientProxyMembershipID id,
+       Principal principal, Cache cache) throws ClassNotFoundException,
+       NoSuchMethodException, IllegalAccessException, InvocationTargetException,
+       NotAuthorizedException {
+     this(authzFactoryName, id.getDistributedMember(), principal, cache);
+     this.id = id; 
+     if (this.logger.infoEnabled()) {
+       this.logger.info(
+         LocalizedStrings.AuthorizeRequest_AUTHORIZEREQUEST_CLIENT_0_IS_SETTING_AUTHORIZATION_CALLBACK_TO_1,
+         new Object[] {id, authzFactoryName});
+     }
+   }
+  
+   public AccessControl getAuthzCallback() {
+ 
+     return this.authzCallback;
+   }
+ 
++  public Principal getPrincipal() {
++    return principal;
++  }
++
++  public boolean isPrincipalSerializable() {
++    return isPrincipalSerializable;
++  }
++
++  public LogWriterI18n getLogger() {
++    return logger;
++  }
++  
+   public GetOperationContext getAuthorize(String regionName, Object key,
+       Object callbackArg) throws NotAuthorizedException {
+ 
+     GetOperationContext getContext = new GetOperationContextImpl(key, false);
+     getContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, getContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_GET_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       if (this.logger.fineEnabled()) {
+         this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr}); 
+       }
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform GET operation on region [" + regionName
+             + ']');
+       }
+     }
+     return getContext;
+   }
+ 
+   public PutOperationContext putAuthorize(String regionName, Object key,
+       Object value, boolean isObject, Object callbackArg)
+       throws NotAuthorizedException {
+ 
+     return putAuthorize(regionName, key, value, isObject,
+         callbackArg, PutOperationContext.UNKNOWN);
+   }
+ 
+   public PutOperationContext putAuthorize(String regionName, Object key,
+       Object value, boolean isObject, Object callbackArg, byte opType)
+       throws NotAuthorizedException {
+     
+     PutOperationContext putContext = new PutOperationContext(key,
+         value, isObject, opType, false);    
+     putContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, putContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_PUT_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform PUT operation on region [" + regionName
+             + ']');
+       }
+     }
+     return putContext;
+   }
+ 
+   public PutAllOperationContext putAllAuthorize(String regionName, Map map, Object callbackArg)
+   throws NotAuthorizedException {
+     PutAllOperationContext putAllContext = new PutAllOperationContext(map);
+     putAllContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, putAllContext)) {
+       final String errStr = 
+         LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_PUTALL_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       if (this.logger.warningEnabled()) {
+         this.logger.warning(LocalizedStrings.TWO_ARG_COLON, new Object[] {this, errStr});
+       }
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       } else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform PUTALL operation on region [" + regionName
+             + ']');
+       }
+       
+       // now since we've authorized to run PUTALL, we also need to verify all the 
+       // <key,value> are authorized to run PUT
+       /* According to Jags and Suds, we will not auth PUT for PUTALL for now
+        * We will only do auth once for each operation, i.e. PUTALL only
+       Collection entries = map.entrySet();
+       Iterator iterator = entries.iterator();
+       Map.Entry mapEntry = null;
+       while (iterator.hasNext()) {
+         mapEntry = (Map.Entry)iterator.next();
+         String currkey = (String)mapEntry.getKey();
+         Object value = mapEntry.getValue();
+         boolean isObject = true;
+         if (value instanceof byte[]) {
+           isObject = false;
+         }
+         byte[] serializedValue = ((CachedDeserializable)value).getSerializedValue();
+ 
+         PutOperationContext putContext = new PutOperationContext(currkey,
+             serializedValue, isObject, PutOperationContext.UNKNOWN, false);
+         putContext.setCallbackArg(null);
+         if (!this.authzCallback.authorizeOperation(regionName, putContext)) {
+           String errStr = "Not authorized to perform PUT operation on region ["
+               + regionName + ']' + " for key "+currkey +". PUTALL is not authorized either.";
+           if (this.logger.warningEnabled()) {
+             this.logger.warning(toString() + ": " + errStr);
+           }
+           if (this.isPrincipalSerializable) {
+             throw new NotAuthorizedException(errStr, this.principal);
+           }
+           else {
+             throw new NotAuthorizedException(errStr);
+           }
+         } else {
+           if (this.logger.finestEnabled()) {
+             this.logger.finest(toString()
+                 + ": PUT is authorized in PUTALL for "+currkey+" isObject("+isObject+") on region ["
+                 + regionName + ']');
+           }
+         }
+       } // while iterating map
+       */
+     }
+     return putAllContext;
+   }
+   
+   public RemoveAllOperationContext removeAllAuthorize(String regionName, Collection<?> keys, Object callbackArg)
+   throws NotAuthorizedException {
+     RemoveAllOperationContext removeAllContext = new RemoveAllOperationContext(keys);
+     removeAllContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, removeAllContext)) {
+       final String errStr = 
+         LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_REMOVEALL_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       if (this.logger.warningEnabled()) {
+         this.logger.warning(LocalizedStrings.TWO_ARG_COLON, new Object[] {this, errStr});
+       }
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       } else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform removeAll operation on region [" + regionName
+             + ']');
+       }
+     }
+     return removeAllContext;
+   }
+   public DestroyOperationContext destroyAuthorize(String regionName,
+       Object key, Object callbackArg) throws NotAuthorizedException {
+ 
+     DestroyOperationContext destroyEntryContext = new DestroyOperationContext(
+         key);
+     destroyEntryContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, destroyEntryContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_DESTROY_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform DESTROY operation on region ["
+             + regionName + ']');
+       }
+     }
+     return destroyEntryContext;
+   }
+ 
+   public QueryOperationContext queryAuthorize(String queryString,
+       Set regionNames) throws NotAuthorizedException {
+     return queryAuthorize(queryString, regionNames, null);
+   }
+   
+   public QueryOperationContext queryAuthorize(String queryString,
+       Set regionNames, Object[] queryParams) throws NotAuthorizedException {
+ 
+     if (regionNames == null) {
+       regionNames = new HashSet();
+     }
+     QueryOperationContext queryContext = new QueryOperationContext(queryString,
+         regionNames, false, queryParams);
+     if (!this.authzCallback.authorizeOperation(null, queryContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFOM_QUERY_OPERATION_0_ON_THE_CACHE.toLocalizedString(queryString);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform QUERY operation [" + queryString
+             + "] on cache");
+       }
+     }
+     return queryContext;
+   }
+ 
+   public ExecuteCQOperationContext executeCQAuthorize(String cqName,
+       String queryString, Set regionNames) throws NotAuthorizedException {
+ 
+     if (regionNames == null) {
+       regionNames = new HashSet();
+     }
+     ExecuteCQOperationContext executeCQContext = new ExecuteCQOperationContext(
+         cqName, queryString, regionNames, false);
+     if (!this.authzCallback.authorizeOperation(null, executeCQContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFOM_EXECUTE_CQ_OPERATION_0_ON_THE_CACHE.toLocalizedString(queryString);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform EXECUTE_CQ operation [" + queryString
+             + "] on cache");
+       }
+     }
+     return executeCQContext;
+   }
+ 
+   public void stopCQAuthorize(String cqName, String queryString, Set regionNames)
+       throws NotAuthorizedException {
+ 
+     StopCQOperationContext stopCQContext = new StopCQOperationContext(cqName,
+         queryString, regionNames);
+     if (!this.authzCallback.authorizeOperation(null, stopCQContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFOM_STOP_CQ_OPERATION_0_ON_THE_CACHE.toLocalizedString(cqName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform STOP_CQ operation [" + cqName + ','
+             + queryString + "] on cache");
+       }
+     }
+   }
+ 
+   public void closeCQAuthorize(String cqName, String queryString,
+       Set regionNames) throws NotAuthorizedException {
+ 
+     CloseCQOperationContext closeCQContext = new CloseCQOperationContext(
+         cqName, queryString, regionNames);
+     if (!this.authzCallback.authorizeOperation(null, closeCQContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFOM_CLOSE_CQ_OPERATION_0_ON_THE_CACHE.toLocalizedString(cqName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform CLOSE_CQ operation [" + cqName + ','
+             + queryString + "] on cache");
+       }
+     }
+   }
+ 
+   public void getDurableCQsAuthorize()
+       throws NotAuthorizedException {
+ 
+     GetDurableCQsOperationContext getDurableCQsContext = new GetDurableCQsOperationContext();
+     if (!this.authzCallback.authorizeOperation(null, getDurableCQsContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_GET_DURABLE_CQS_OPERATION_0_ON_THE_CACHE.toLocalizedString();
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform GET_DURABLE_CQS operation on cache");
+       }
+     }
+   }
+   
+   public RegionClearOperationContext clearAuthorize(String regionName,
+       Object callbackArg) throws NotAuthorizedException {
+ 
+     RegionClearOperationContext regionClearContext = new RegionClearOperationContext(
+         false);
+     regionClearContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, regionClearContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_REGION_CLEAR_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform REGION_CLEAR operation on region ["
+             + regionName + ']');
+       }
+     }
+     return regionClearContext;
+   }
+ 
+   public RegisterInterestOperationContext registerInterestAuthorize(
+       String regionName, Object key, int interestType,
+       InterestResultPolicy policy) throws NotAuthorizedException {
+ 
+     RegisterInterestOperationContext registerInterestContext = new RegisterInterestOperationContext(
+         key, InterestType.fromOrdinal((byte)interestType), policy);
+     if (!this.authzCallback.authorizeOperation(regionName,
+         registerInterestContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_REGISTER_INTEREST_OPERATION_FOR_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger
+             .finest(toString()
+                 + ": Authorized to perform REGISTER_INTEREST operation for region ["
+                 + regionName + ']');
+       }
+     }
+     return registerInterestContext;
+   }
+ 
+   public RegisterInterestOperationContext registerInterestListAuthorize(
+       String regionName, List keys, InterestResultPolicy policy)
+       throws NotAuthorizedException {
+ 
+     RegisterInterestOperationContext registerInterestListContext;
+     registerInterestListContext = new RegisterInterestOperationContext(keys,
+         InterestType.LIST, policy);
+     if (!this.authzCallback.authorizeOperation(regionName,
+         registerInterestListContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_REGISTER_INTEREST_LIST_OPERATION_FOR_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger
+             .finest(toString()
+                 + ": Authorized to perform REGISTER_INTEREST_LIST operation for region ["
+                 + regionName + ']');
+       }
+     }
+     return registerInterestListContext;
+   }
+ 
+   public UnregisterInterestOperationContext unregisterInterestAuthorize(
+       String regionName, Object key, int interestType)
+       throws NotAuthorizedException {
+ 
+     UnregisterInterestOperationContext unregisterInterestContext;
+     unregisterInterestContext = new UnregisterInterestOperationContext(key,
+         InterestType.fromOrdinal((byte)interestType));
+     if (!this.authzCallback.authorizeOperation(regionName,
+         unregisterInterestContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_UNREGISTER_INTEREST_OPERATION_FOR_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform DESTROY operation on region ["
+             + regionName + ']');
+       }
+     }
+     return unregisterInterestContext;
+   }
+ 
+   public UnregisterInterestOperationContext unregisterInterestListAuthorize(
+       String regionName, List keys) throws NotAuthorizedException {
+ 
+     UnregisterInterestOperationContext unregisterInterestListContext;
+     unregisterInterestListContext = new UnregisterInterestOperationContext(
+         keys, InterestType.LIST);
+     if (!this.authzCallback.authorizeOperation(regionName,
+         unregisterInterestListContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_UNREGISTER_INTEREST_LIST_OPERATION_FOR_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger
+             .finest(toString()
+                 + ": Authorized to perform UNREGISTER_INTEREST_LIST operation for region ["
+                 + regionName + ']');
+       }
+     }
+     return unregisterInterestListContext;
+   }
+ 
+   public KeySetOperationContext keySetAuthorize(String regionName)
+       throws NotAuthorizedException {
+ 
+     KeySetOperationContext keySetContext = new KeySetOperationContext(false);
+     if (!this.authzCallback.authorizeOperation(regionName, keySetContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_KEY_SET_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform KEY_SET operation on region ["
+             + regionName + ']');
+       }
+     }
+     return keySetContext;
+   }
+ 
+   public void containsKeyAuthorize(String regionName, Object key)
+       throws NotAuthorizedException {
+ 
+     ContainsKeyOperationContext containsKeyContext = new ContainsKeyOperationContext(
+         key);
+     if (!this.authzCallback.authorizeOperation(regionName, containsKeyContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_CONTAINS_KEY_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform CONTAINS_KEY operation on region ["
+             + regionName + ']');
+       }
+     }
+   }
+ 
+   public void createRegionAuthorize(String regionName)
+       throws NotAuthorizedException {
+ 
+     RegionCreateOperationContext regionCreateContext = new RegionCreateOperationContext(
+         false);
+     if (!this.authzCallback.authorizeOperation(regionName, regionCreateContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_CREATE_REGION_OPERATION_FOR_THE_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform REGION_CREATE operation of region ["
+             + regionName + ']');
+       }
+     }
+   }
+ 
+   public RegionDestroyOperationContext destroyRegionAuthorize(
+       String regionName, Object callbackArg) throws NotAuthorizedException {
+ 
+     RegionDestroyOperationContext regionDestroyContext = new RegionDestroyOperationContext(
+         false);
+     regionDestroyContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback
+         .authorizeOperation(regionName, regionDestroyContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_REGION_DESTROY_OPERATION_FOR_THE_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform REGION_DESTROY operation for region ["
+             + regionName + ']');
+       }
+     }
+     return regionDestroyContext;
+   }
+   
+   public ExecuteFunctionOperationContext executeFunctionAuthorize(
+       String functionName, String region, Set keySet, Object arguments, boolean optimizeForWrite)
+       throws NotAuthorizedException {
+     ExecuteFunctionOperationContext executeContext = new ExecuteFunctionOperationContext(
+         functionName, region, keySet, arguments, optimizeForWrite, false);
+     if (!this.authzCallback.authorizeOperation(region, executeContext)) {
+       final String errStr = LocalizedStrings.
+         AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_EXECUTE_REGION_FUNCTION_OPERATION
+           .toLocalizedString();
+       if (this.logger.warningEnabled()) {
+         this.logger.warning(LocalizedStrings.TWO_ARG_COLON,
+             new Object[] {this, errStr});
+       }
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform EXECUTE_REGION_FUNCTION operation ");
+       }
+     }
+     return executeContext;
+   }
+ 
+   public InvalidateOperationContext invalidateAuthorize(String regionName,
+       Object key, Object callbackArg) throws NotAuthorizedException {
+ 
+     InvalidateOperationContext invalidateEntryContext = new InvalidateOperationContext(
+         key);
+     invalidateEntryContext.setCallbackArg(callbackArg);
+     if (!this.authzCallback.authorizeOperation(regionName, invalidateEntryContext)) {
+       String errStr = LocalizedStrings.AuthorizeRequest_NOT_AUTHORIZED_TO_PERFORM_INVALIDATE_OPERATION_ON_REGION_0.toLocalizedString(regionName);
+       this.logger.warning(
+           LocalizedStrings.TWO_ARG_COLON,
+           new Object[] {this, errStr});
+       if (this.isPrincipalSerializable) {
+         throw new NotAuthorizedException(errStr, this.principal);
+       }
+       else {
+         throw new NotAuthorizedException(errStr);
+       }
+     }
+     else {
+       if (this.logger.finestEnabled()) {
+         this.logger.finest(toString()
+             + ": Authorized to perform INVALIDATE operation on region ["
+             + regionName + ']');
+       }
+     }
+     return invalidateEntryContext;
+   }
+ 
+   public void close() {
+ 
+     this.authzCallback.close();
+   }
+ 
+   @Override
+   public String toString() {
+     return (this.id == null ? "ClientProxyMembershipID not available" : id.toString()) + ",Principal:"
+     + (this.principal == null ? "" : this.principal.getName());
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c01d5f4/geode-core/src/main/java/com/gemstone/gemfire/management/CacheServerMXBean.java
----------------------------------------------------------------------
diff --cc geode-core/src/main/java/com/gemstone/gemfire/management/CacheServerMXBean.java
index 0000000,62aedbd..7ab5072
mode 000000,100644..100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/CacheServerMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/CacheServerMXBean.java
@@@ -1,0 -1,397 +1,405 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management;
+ 
+ import com.gemstone.gemfire.cache.server.CacheServer;
++import com.gemstone.gemfire.management.internal.security.Resource;
++import com.gemstone.gemfire.management.internal.security.ResourceConstants;
++import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+ 
+ /**
+  * MBean that provides access to information and management functionality for a
+  * {@link CacheServer}.
+  * 
+  * <p>The will be one CacheServermBean per {@link CacheServer} started in GemFire node.
+  * 
+  * <p> ObjectName for this MBean is GemFire:service=CacheServer,port={0},type=Member,member={1}
+  * <p>
+  * <table border="1">
+  * <tr>
+  * <th>Notification Type</th>
+  * <th>Notification Source</th>
+  * <th>Message</th>
+  * </tr>
+  * <tr>
+  * <td>gemfire.distributedsystem.cacheserver.client.joined</td>
+  * <td>CacheServer MBean Name</td>
+  * <td>Client joined with Id &ltClient ID&gt</td>
+  * </tr>
+  * <tr>
+  * <td>gemfire.distributedsystem.cacheserver.client.left</td>
+  * <td>CacheServer MBean Name</td>
+  * <td>Client crashed with Id &ltClient ID&gt</td>
+  * </tr>
+  * <tr>
+  * <td>gemfire.distributedsystem.cacheserver.client.crashed</td>
+  * <td>CacheServer MBean Name</td>
+  * <td>Client left with Id &ltClient ID&gt</td>
+  * </tr>
+  * </table>
+  * 
+  * @author rishim
+  * @since 7.0
+  * 
+  */
+ public interface CacheServerMXBean {
+ 
+   /**
+    * Returns the port on which this CacheServer listens for clients.
+    */
+   public int getPort();
+ 
+   /**
+    * Returns a string representing the IP address or host name that this
+    * CacheServer will listen on.
+    */
+   public String getBindAddress();
+   
+   /**
+    * Returns the configured buffer size of the socket connection for this CacheServer.
+    */
+   public int getSocketBufferSize();
+ 
+   /**
+    * Returns the maximum amount of time between client pings. This value is used to determine the 
+    * health of client's attached to the server.
+    */
+   public int getMaximumTimeBetweenPings();
+   
+   /**
+    * Returns the maximum allowed client connections.
+    */
+   public int getMaxConnections();
+ 
+   /**
+    * Returns the maxium number of threads allowed in this CacheServer to service client requests.
+    */
+   public int getMaxThreads();
+   
+   /**
+    * Returns the maximum number of messages that can be enqueued in a client-queue.
+    */
+   public int getMaximumMessageCount();
+ 
+   /**
+    * Returns the time (in seconds) after which a message in the client queue will expire.
+    */
+   public int getMessageTimeToLive();
+   
+   /**
+    * Returns the frequency (in milliseconds) to poll the load probe on this CacheServer.
+    */
+   public long getLoadPollInterval();
+   
+   /**
+    * Returns the name or IP address to pass to the client as the location
+    * where the server is listening. When the server connects to the locator it tells
+    * the locator the host and port where it is listening for client connections. If
+    * the host the server uses by default is one that the client can’t translate into
+    * an IP address, the client will have no route to the server’s host and won’t be
+    * able to find the server. For this situation, you must supply the server’s
+    * alternate hostname for the locator to pass to the client.
+    */
+   public String getHostNameForClients();
+   
+   /**
+    * Returns the load probe for this CacheServer.
+    */
+   public ServerLoadData fetchLoadProbe();
+ 
+   /**
+    * Returns whether or not this CacheServer is running.
+    * 
+    * @return True of the server is running, false otherwise.
+    */
+   public boolean isRunning();
+ 
+   /**
+    * Returns the capacity (in megabytes) of the client queue.
+    */
+   public int getCapacity();
+ 
+   /**
+    * Returns the eviction policy that is executed when the capacity of the client
+    * queue is reached.
+    */
+   public String getEvictionPolicy();
+ 
+   /**
+    * Returns the name of the disk store that is used for persistence.
+    */
+   public String getDiskStoreName();
+ 
+   /**
+    * Returns the number of sockets accepted and used for client to server messaging.
+    */
+   public int getClientConnectionCount();
+   
+   /**
+    * Returns the number of client virtual machines connected.
+    */
+   public int getCurrentClients();
+ 
+   /**
+    * Returns the average get request latency.
+    */
+   public long getGetRequestAvgLatency();
+ 
+   /**
+    * Returns the average put request latency.
+    */
+   public long getPutRequestAvgLatency();
+ 
+   /**
+    * Returns the total number of client connections that timed out and were
+    * closed.
+    */
+   public int getTotalConnectionsTimedOut();
+ 
+   /**
+    * Returns the total number of client connection requests that failed.
+    */
+   public int getTotalFailedConnectionAttempts();
+ 
+   /**
+    * Returns the current number of connections waiting for a thread to start
+    * processing their message.
+    */
+   public int getThreadQueueSize();
+ 
+   /**
+    * Returns the current number of threads handling a client connection.
+    */
+   public int getConnectionThreads();
+ 
+   /**
+    * Returns the load from client to server connections as reported by the load
+    * probe installed in this server.
+    */
+   public double getConnectionLoad();
+ 
+   /**
+    * Returns the estimate of how much load is added for each new connection as
+    * reported by the load probe installed in this server.
+    */
+   public double getLoadPerConnection();
+ 
+   /**
+    * Returns the load from queues as reported by the load probe installed in
+    * this server.
+    */
+   public double getQueueLoad();
+ 
+   /**
+    * Returns the estimate of how much load is added for each new queue as
+    * reported by the load probe installed in this server.
+    */
+   public double getLoadPerQueue();
+ 
+ 
+   /**
+    * Returns the rate of get requests.
+    */
+   public float getGetRequestRate();
+ 
+   /**
+    * Returns the rate of put requests.
+    */
+   public float getPutRequestRate();
+   
+   /**
+    * Returns the total number of bytes sent to clients.
+    */
+   public long getTotalSentBytes();
+ 
+   /**
+    * Returns the total number of bytes received from clients.
+    */
+   public long getTotalReceivedBytes();
+   
+   /**
+    * Returns the number of cache client notification requests.
+    */
+   public int getNumClientNotificationRequests();
+ 
+   /**
+    * Returns the average latency for processing client notifications.
+    */
+   public long getClientNotificationAvgLatency();
+ 
+   /**
+    * Returns the rate of client notifications.
+    */
+   public float getClientNotificationRate();
+ 
+   /**
+    * Returns the number of registered CQs.
+    */
+   public long getRegisteredCQCount();
+ 
+   /**
+    * Returns the number of active (currently executing) CQs.
+    */
+   public long getActiveCQCount();
+ 
+   /**
+    * Returns the rate of queries.
+    */
+   public float getQueryRequestRate();
+ 
+   /**
+    * Returns the total number of indexes in use by the member.
+    */
+   public int getIndexCount();
+ 
+   /**
+    * Returns a list of names for all indexes.
+    */
+   public String[] getIndexList();
+ 
+   /**
+    * Returns the total time spent updating indexes due to changes in the data.
+    */
+   public long getTotalIndexMaintenanceTime();
+ 
+   /**
+    * Remove an index.
+    * 
+    * @param indexName
+    *          Name of the index to be removed.
+    */
++  @ResourceOperation(resource=Resource.REGION, operation=ResourceConstants.DESTROY_INDEX)
+   public void removeIndex(String indexName) throws Exception;
+ 
+   /**
+    * Returns a list of names for all registered CQs.
+    */
+   public String[] getContinuousQueryList();
+ 
+   /**
+    * Execute an ad-hoc CQ on the server
+    * 
+    * @param queryName
+    *          Name of the CQ to execute.
+    * @deprecated This method is dangerous because it only modifies the
+    * target cache server - other copies of the CQ on other servers are
+    * not affected. Using the client side CQ methods to modify a CQ.
+    */
 -  @Deprecated 
++  @Deprecated
++  @ResourceOperation(resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERY)
+   public void executeContinuousQuery(String queryName) throws Exception;
+ 
+   /**
+    * Stop (pause) a CQ from executing
+    * 
+    * @param queryName
+    *          Name of the CQ to stop.
+    *          
+    * @deprecated This method is dangerous because it only modifies the
+    * target cache server - other copies of the CQ on other servers are
+    * not affected. Using the client side CQ methods to modify a CQ.         
+    */
+   @Deprecated
++  @ResourceOperation(resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.STOP_CONTINUOUS_QUERY)
+   public void stopContinuousQuery(String queryName) throws Exception;
+ 
+   /**
+    * Unregister all CQs from a region
+    * 
+    * @param regionName
+    *          Name of the region from which to remove CQs.
+    * @deprecated This method is dangerous because it only modifies the
+    * target cache server - other copies of the CQ on other servers are
+    * not affected. Using the client side CQ methods to modify a CQ.         
+    */
+   @Deprecated
++  @ResourceOperation(resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.STOP_CONTINUOUS_QUERY)
+   public void closeAllContinuousQuery(String regionName) throws Exception;
+   
+   
+   /**
+    * Unregister a CQ
+    * 
+    * @param queryName
+    *          Name of the CQ to unregister.
+    * @deprecated This method is dangerous because it only modifies the
+    * target cache server - other copies of the CQ on other servers are
+    * not affected. Using the client side CQ methods to modify a CQ.         
+    */
+   @Deprecated
++  @ResourceOperation(resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.STOP_CONTINUOUS_QUERY)
+   public void closeContinuousQuery(String queryName) throws Exception;
+ 
+ 
+   /**
+    * Returns a list of IDs for all connected clients.
+    * 
+    * @return A list of IDs or a length 0 array if no clients are registered.
+    */
+   public String[] getClientIds() throws Exception;
+ 
+   /**
+    * Returns health and statistic information for the give client id. Some of the
+    * information (CPUs, NumOfCacheListenerCalls, NumOfGets,NumOfMisses,
+    * NumOfPuts,NumOfThreads, ProcessCpuTime) only available for clients which
+    * have set a "StatisticsInterval".
+    * 
+    * @param clientId
+    *          ID of the client for which to retrieve information.
+    */
+   public ClientHealthStatus showClientStats(String clientId) throws Exception;
+   
+   /**
+    * Returns the number of clients who have existing subscriptions.
+    */
+   public int getNumSubscriptions();
+ 
+   /**
+    * Returns health and statistic information for all clients. Some of the
+    * information (CPUs, NumOfCacheListenerCalls, NumOfGets,NumOfMisses,
+    * NumOfPuts,NumOfThreads, ProcessCpuTime) only available for clients which
+    * have set a "StatisticsInterval".
+    */
+   public ClientHealthStatus[] showAllClientStats() throws Exception;
+   
+   /**
+    * Shows a list of client with their queue statistics. Client queue statistics
+    * shown in this method are the following
+    * 
+    * eventsEnqued,eventsRemoved , eventsConflated ,markerEventsConflated ,
+    * eventsExpired, eventsRemovedByQrm , eventsTaken , numVoidRemovals
+    * 
+    * @return an array of ClientQueueDetail
+    * @throws Exception
+    */
+   public ClientQueueDetail[] showClientQueueDetails() throws Exception;
+   
+   /**
+    * 
+    * Shows queue statistics of the given client. Client queue statistics
+    * shown in this method are the following
+    * 
+    * eventsEnqued,eventsRemoved , eventsConflated ,markerEventsConflated ,
+    * eventsExpired, eventsRemovedByQrm , eventsTaken , numVoidRemovals
+    * 
+    * @param clientId the ID of client which is returned by the attribute ClientIds 
+    * @return ClientQueueDetail
+    * @throws Exception
+    */
+   public ClientQueueDetail showClientQueueDetails(String clientId) throws Exception;
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c01d5f4/geode-core/src/main/java/com/gemstone/gemfire/management/DiskStoreMXBean.java
----------------------------------------------------------------------
diff --cc geode-core/src/main/java/com/gemstone/gemfire/management/DiskStoreMXBean.java
index 0000000,110a791..8ac9c2c
mode 000000,100644..100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DiskStoreMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DiskStoreMXBean.java
@@@ -1,0 -1,212 +1,221 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management;
+ 
+ import com.gemstone.gemfire.cache.DiskStore;
++import com.gemstone.gemfire.management.internal.security.Resource;
++import com.gemstone.gemfire.management.internal.security.ResourceConstants;
++import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+ 
+ 
+ /**
+  * MBean that provides access to information and management functionality for a
+  * {@link DiskStore}.
+  * 
+  * @author rishim
+  * @since 7.0
+  * 
+  */
+ public interface DiskStoreMXBean {
+   
+   /**
+    * Returns the name of the DiskStore.
+    */
+   public String getName();
+ 
+   /**
+    * Returns whether disk files are to be automatically compacted.
+    * 
+    * @return True if disk files are automatically compacted, false otherwise
+    */
+   public boolean isAutoCompact();
+   
+   /**
+    * Returns the threshold at which an op-log may be compacted. Until it
+    * reaches this threshold the op-log will not be compacted. The threshold is
+    * a percentage in the range 0..100.
+    */
+   public int getCompactionThreshold();
+ 
+   /**
+    * Returns whether manual compaction of disk files is allowed.
+    * 
+    * @return True if manual compaction is allowed, false otherwise.
+    */
+   public boolean isForceCompactionAllowed();
+ 
+   /**
+    * Returns the maximum size (in megabytes) that a single op-log can grow to.
+    */
+   public long getMaxOpLogSize();
+ 
+   /**
+    * Returns the time (in milliseconds) that can elapse before unwritten
+    * data is saved to disk.
+    */
+   public long getTimeInterval();
+   
+   /**
+    * Returns the size of the write buffer that this DiskStore will use when
+    * writing data to disk.
+    */
+   public int getWriteBufferSize();
+ 
+   /**
+    * Returns the path of the directories to which the region's data will be
+    * written.
+    */
+   public String[] getDiskDirectories();
+ 
+   /**
+    * Returns the maximum number of operations that can be asynchronously
+    * queued for saving to disk. When this limit is reached operations
+    * will block until they can be put in the queue.
+    */
+   public int getQueueSize();
+ 
+   /**
+    * Returns the total number of bytes of space this DiskStore has used.
+    */
+   public long getTotalBytesOnDisk();
+ 
+   /**
+    * Returns the average latency of disk reads in nanoseconds Its the average
+    * latency required to read a byte from disk.
+    * 
+    * Each entry in region has some overhead in terms of number of extra bytes
+    * while persisting data. So this rate won't match the number of bytes put in
+    * all regions.This is rate of actual bytes system is persisting.
+    */
+   public float getDiskReadsRate();
+ 
+   /**
+    * Returns the average latency of disk writes in nanoseconds. Its the average
+    * latency required to write a byte to disk.
+    * 
+    * Each entry in region has some overhead in terms of number of extra bytes
+    * while persisting data. So this rate won't match the number of bytes put in
+    * all regions. This is rate of actual bytes system is persisting.
+    */
+   public float getDiskWritesRate();
+ 
+   /**
+    * Returns the disk reads average latency in nanoseconds. It depicts average
+    * time needed to read one byte of data from disk.
+    */
+   public long getDiskReadsAvgLatency();
+ 
+   /**
+    * Returns the disk writes average latency in nanoseconds. It depicts average
+    * time needed to write one byte of data to disk.
+    */
+   public long getDiskWritesAvgLatency();
+ 
+   /**
+    * Returns the flush time average latency.
+    */
+   public long getFlushTimeAvgLatency();
+ 
+   /**
+    * Returns the number of entries in the asynchronous queue waiting to be written
+    * to disk.
+    */
+   public int getTotalQueueSize();
+ 
+   /**
+    * Returns the number of backups currently in progress on this DiskStore.
+    */
+   public int getTotalBackupInProgress();
+   
+   /**
+    * Returns the number of backups of this DiskStore that have been completed.
+    */
+   public int getTotalBackupCompleted();
+ 
+   /**
+    * Returns the number of persistent regions currently being recovered from disk.
+    */
+   public int getTotalRecoveriesInProgress();
+ 
+   /**
+    * Requests the DiskStore to start writing to a new op-log. The old oplog will
+    * be asynchronously compressed if compaction is set to true. The new op-log will
+    * be created in the next available directory with free space. If there is no
+    * directory with free space available and compaction is set to false, then a
+    * DiskAccessException saying that the disk is full will be thrown. If
+    * compaction is true then the application will wait for the other op-logs to
+    * be compacted and additional space is available.
+    */
++  @ResourceOperation(resource=Resource.DISKSTORE, operation=ResourceConstants.FORCE_ROLL)
+   public void forceRoll();
+ 
+   /**
+    * Requests the DiskStore to start compacting. The compaction is done even if
+    * automatic compaction is not configured. If the current, active op-log has
+    * had data written to it, and may be compacted, then an implicit  call to
+    * forceRoll will be made so that the active op-log can be compacted. This
+    * method will block until compaction finishes.
+    * 
+    * @return True if one or more op-logs were compacted or false to indicate
+    *         that no op-logs were ready to be compacted or that a compaction was
+    *         already in progress.
+    */
++  @ResourceOperation(resource=Resource.DISKSTORE, operation=ResourceConstants.FORCE_COMPACTION)
+   public boolean forceCompaction();
+   
+   /**
+    * Causes any data that is currently in the asynchronous queue to be written
+    * to disk. Does not return until the flush is complete.
+    */
++  @ResourceOperation(resource=Resource.DISKSTORE, operation=ResourceConstants.FLUSH_DISKSTORE)
+   public void flush();
+ 
+   /**
+    * Returns the warning threshold for disk usage as a percentage of the total 
+    * disk volume.
+    * 
+    * @return the warning percent
+    * @since 8.0
+    */
+   public float getDiskUsageWarningPercentage();
+ 
+   /**
+    * Returns the critical threshold for disk usage as a percentage of the total 
+    * disk volume.
+    * 
+    * @return the critical percent
+    * @since 8.0
+    */
+   public float getDiskUsageCriticalPercentage();
+   
+   /**
+    * Sets the value of the disk usage warning percentage.
+    * 
+    * @param warningPercent the warning percent
+    */
++  @ResourceOperation(resource=Resource.DISKSTORE, operation=ResourceConstants.SET_DISK_USAGE)
+   public void setDiskUsageWarningPercentage(float warningPercent);
+   
+   /**
+    * Sets the value of the disk usage critical percentage.
+    * 
+    * @param criticalPercent the critical percent
+    */
++  @ResourceOperation(resource=Resource.DISKSTORE, operation=ResourceConstants.SET_DISK_USAGE)
+   public void setDiskUsageCriticalPercentage(float criticalPercent);
++  
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c01d5f4/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
----------------------------------------------------------------------
diff --cc geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
index 0000000,3ce1730..2a41f30
mode 000000,100644..100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/DistributedSystemMXBean.java
@@@ -1,0 -1,696 +1,696 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management;
+ 
+ import java.util.Map;
+ 
+ import javax.management.ObjectName;
+ 
+ import com.gemstone.gemfire.cache.DiskStore;
+ import com.gemstone.gemfire.cache.Region;
+ import com.gemstone.gemfire.distributed.DistributedMember;
+ import com.gemstone.gemfire.distributed.DistributedSystem;
+ import com.gemstone.gemfire.management.internal.security.Resource;
+ import com.gemstone.gemfire.management.internal.security.ResourceConstants;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperationContext.ResourceOperationCode;
+ 
+ /**
+  * MBean that provides access to information and management operations for a
+  * {@link DistributedSystem}.
+  *
+  * It also provides an API for navigating the other MBeans exposed by the
+  * GemFire distributed system.
+  * 
+  * There will be one DistributedSystemMBean per GemFire cluster.
+  * 
+  * <p> ObjectName : GemFire:service=System,type=Distributed
+  * 
+  * <p> List of notifications emitted by this MBean.
+  * 
+  * <p>
+  * <table border="1">
+  * <tr>
+  * <th>Notification Type</th>
+  * <th>Notification Source</th>
+  * <th>Message</th>
+  * </tr>
+  * <tr>
+  * <td>gemfire.distributedsystem.cache.member.joined</td>
+  * <td>Name or ID of member who joined</td>
+  * <td>Member Joined &ltMember Name or ID&gt</td>
+  * </tr>
+  * <tr>
+  * <td>gemfire.distributedsystem.cache.member.departed</td>
+  * <td>Name or ID of member who departed</td>
+  * <td>Member Departed &ltMember Name or ID&gt has crashed = &lttrue/false&gt</td>
+  * </tr>
+  * <tr>
+  * <td>gemfire.distributedsystem.cache.member.suspect</td>
+  * <td>Name or ID of member who is suspected</td>
+  * <td>Member Suspected &ltMember Name or ID&gt By &ltWho Suspected&gt</td>
+  * </tr>
+  * <tr>
+  * <td>system.alert</td>
+  * <td>DistributedSystem("&ltDistributedSystem ID"&gt)</td>
+  * <td>Alert Message</td>
+  * </tr>
+  * </table>
+  *
+  * @author rishim
+  * @since 7.0
+  *
+  */
+ public interface DistributedSystemMXBean {
+ 
+   /**
+    * Returns the ID of thie DistributedSystem.
+    *
+    * @return The DistributedSystem ID or -1 if not set.
+    */
+   public int getDistributedSystemId();
+ 
+   /**
+    * Returns the number of members in the distributed system.
+    */
+   public int getMemberCount();
+ 
+   /**
+    * Returns a list of names for all members.
+    */
+   @ResourceOperation(resource = Resource.DISTRIBUTED_SYSTEM)
+   public String[] listMembers();
+ 
+   /**
+    * Returns a list of names for locator members.
+    *
+    * @param onlyStandAloneLocators
+    *          if set to <code>true</code>, includes only stand alone locator
+    *          members.
+    * @return a list of names for locator members.
+    */
+   
+   public String[] listLocatorMembers(boolean onlyStandAloneLocators);
+ 
+   /**
+    * Returns a list of names for all groups.
+    */
+   public String[] listGroups();
+ 
+   /**
+    * Returns the number of locators in the distributed system.
+    */
+   public int getLocatorCount();
+ 
+   /**
+    * Returns a list of IDs for all locators.
+    */
+   public String[] listLocators(); //TODO - Abhishek Should be renamed to listLocatorDiscoveryConfigurations? Do we need something for mcast too?
+ 
+   /**
+    * Returns the number of disks stores in the distributed system.
+    */
+   public int getSystemDiskStoreCount();
+ 
+   /**
+    * Returns a map of all {@link DistributedMember}s and their {@link DiskStore}s.
+    */
+   public Map<String, String[]> listMemberDiskstore();
+ 
+   
+   /**
+    *  @return A map of all {@link DistributedMember}s and their HDFSStore's.
+    */
+   
+   public Map<String, String[]> listMemberHDFSStore();
+   
+   
+   /**
+    * Returns a list of IDs for all gateway senders.
+    */
+   public String[] listGatewaySenders();
+ 
+   /**
+    * Returns a list of IDs for all gateway receivers.
+    */
+   public String[] listGatewayReceivers();
+ 
+   /**
+    * Returns the minimum level set for alerts to be delivered to listeners.
+    */
+   public String getAlertLevel();
+ 
+   /**
+    * Sets the minimum level for alerts to be delivered to listeners.
+    *
+    * @param alertLevel
+    *          Minimum level for alerts to be delivered.
+    *          Must be one of: WARNING, ERROR, SEVERE or NONE.
+    */
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.CHANGE_ALERT_LEVEL_DS) 
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.CHANGE_ALERT_LEVEL) 
+   public void changeAlertLevel(String alertLevel) throws Exception;
+ 
+   /**
+    * Returns the total available heap (in megabytes) across all distributed
+    * members.
+    */
+   public long getTotalHeapSize();
+ 
+   /**
+    * Returns the total number of entries in all regions.
+    */
+   public long getTotalRegionEntryCount();
+ 
+   /**
+    * Returns the number of {@link Region}s.
+    */
+ 
+   public int getTotalRegionCount();
+ 
+   /**
+    * Returns the number of times that a cache miss occurred for all regions.
+    */
+   public int getTotalMissCount();
+ 
+   /**
+    * Returns the number of times that a hit occurred for all regions.
+    */
+   public int getTotalHitCount();
+ 
+   /**
+    * Returns the number of connected clients.
+    */
+   public int getNumClients();
+ 
+ 
+   /**
+    * Returns the average number of disk reads per second across all distributed
+    * members.
+    */
+   public float getDiskReadsRate();
+ 
+   /**
+    * Returns the average number of disk writes per second across all
+    * distributed members.
+    */
+   public float getDiskWritesRate();
+ 
+   /**
+    * Returns the average disk flush latency time.
+    */
+   public long getDiskFlushAvgLatency();
+ 
+   /**
+    * Returns the number of backups currently in progress for all disk stores.
+    */
+   public int getTotalBackupInProgress();
+ 
+   /**
+    * Returns the number of initial images in progress.
+    */
+   public int getNumInitialImagesInProgress();
+ 
+   /**
+    * Returns the number of active (currently executing) CQs for all cache servers.
+    */
+   public long getActiveCQCount();
+ 
+   /**
+    * Returns the average number of queries per second across all
+    * distributed members.
+    */
+   public float getQueryRequestRate();
+ 
+   /**
+    * Performs a backup on all members.
+    *
+    * @param targetDirPath
+    *          Directory to which backup files will be written
+    * @param baselineDirPath
+    *          path of the directory for baseline backup.
+    * @return The results of the backup request.
+    */
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.BACKUP_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.BACKUP_MEMBERS)
+   public DiskBackupStatus backupAllMembers(String targetDirPath, String baselineDirPath)
+       throws Exception;
+ 
+   /**
+    * Returns the configuration information for a distributed member.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @return The configuration information for a member.
+    * @throws Exception
+    *           for an invalid member ID.
+    */
+   public GemFireProperties fetchMemberConfiguration(String member)
+       throws Exception;
+ 
+   /**
+    * Returns the total time (in seconds) since a distributed member was started.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @return The total time (in seconds) since a member was started.
+    * @throws Exception
+    *           for an invalid member ID.
+    */
+   public long fetchMemberUpTime(String member) throws Exception;
+ 
+   /**
+    * Returns a list of names for all cache servers which are able to serve requests from GemFire clients.
+    * 
+    */
+   public String[] listCacheServers();
+   
+   
+   /**
+    * Returns a list of names for all servers where server means
+    * any long-running GemFire process that was started with "start server" command from GFSH.
+    */
+   public String[] listServers();
+ 
+   /**
+    * Returns JVM metrics for a distributed member.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @throws Exception
+    *           for an invalid member ID.
+    */
+   public JVMMetrics showJVMMetrics(String member) throws Exception;
+ 
+   /**
+    * Returns operating system metrics for a distributed member.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @throws Exception
+    *           for an invalid member ID.
+    */
+   public OSMetrics showOSMetrics(String member) throws Exception;
+ 
+   /**
+    * Returns network metrics for a distributed member.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @throws Exception
+    *           for an invalid member ID.
+    */
+   public NetworkMetrics showNetworkMetric(String member) throws Exception;
+ 
+   /**
+    * Returns disk metrics for a distributed member.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @throws Exception
+    *           for an invalid member ID.
+    */
+   public DiskMetrics showDiskMetrics(String member) throws Exception;
+ 
+   /**
+    * Shuts down all members of a distributed system except for the managing member.
+    *
+    * @return List of names of all distributed members that were shutdown.
+    */
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.SHUTDOWN_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.SHUTDOWN)
+   public String[] shutDownAllMembers() throws Exception;
+ 
+   /**
+    * Returns a list of names for all regions.
+    */
+   public String[] listRegions();
+ 
+   /**
+    * Returns a list of full paths for all regions.
+    */
+   public String[] listAllRegionPaths();
+ 
+   /**
+    * Removes a disk store from the distributed system.
+    *
+    * @param diskStoreId
+    *          UUID of the disk store to remove
+    * @return True if the request is successful, false otherwise.
+    */
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.REMOVE_DISKSTORE_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.REVOKE_MISSING_DISKSTORE)
+   public boolean revokeMissingDiskStores(String diskStoreId)
+       throws Exception;
+ 
+   /**
+    * Returns a list of details for disk stores which have been determined to be
+    * unavailable during the recovery of region.
+    */
+   public PersistentMemberDetails[] listMissingDiskStores();
+ 
+   /**
+    * Returns the object name for a {@link MemberMXBean} used to access
+    * this distributed member.
+    */
+   public ObjectName getMemberObjectName();
+ 
+   /**
+    * Returns the object name for a {@link ManagerMXBean} used to access the
+    * management service running on this distributed member.
+    */
+   public ObjectName getManagerObjectName();
+ 
+   /**
+    * Returns a list of object names for the {@link MemberMXBean}s used to
+    * access all distributed members.
+    */
+   public ObjectName[] listMemberObjectNames();
+ 
+   /**
+    * Returns the object name for a {@link MemberMXBean} used to access
+    * a distributed member..
+    *
+    * @param member
+    *          Name or ID of the member.
+    */
+   public ObjectName fetchMemberObjectName(String member) throws Exception;
+ 
+   /**
+    * Returns a list of object names for the {@link RegionMXBean}s used to
+    * access all regions on a distributed member.
+    *
+    * @param memberMBeanName
+    *          ObjectName of the member.
+    */
+   public ObjectName[] fetchRegionObjectNames(ObjectName memberMBeanName)
+       throws Exception;
+ 
+   /**
+    * Returns a list of object names for the {@link DistributedRegionMXBean}s
+    * used to access all distributed regions.
+    *
+    * @return An array of object names or an empty array if no distributed regions
+    *         are found.
+    */
+   public ObjectName[] listDistributedRegionObjectNames();
+ 
+   /**
+    * Returns the object name for a {@link DistributedRegionMXBean} used to
+    * access a distributed region.
+    *
+    * @param regionPath
+    *          Full path of the region.
+    */
+   public ObjectName fetchDistributedRegionObjectName(String regionPath)
+       throws Exception;
+ 
+   /**
+    * Returns the object name for a {@link RegionMXBean} used to access
+    * a region.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @param regionPath
+    *          Full path of the region.
+    */
+   public ObjectName fetchRegionObjectName(String member, String regionPath)
+       throws Exception;
+ 
+   /**
+    * Returns the object name for a {@link GatewaySenderMXBean} used to access
+    * a gateway sender.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @param senderId
+    *          ID of a gateway sender.
+    */
+   public ObjectName fetchGatewaySenderObjectName(String member,
+       String senderId) throws Exception;
+ 
+   /**
+    * Returns the object name for a {@link GatewayReceiverMXBean} used to access
+    * a gateway receiver.
+    *
+    * @param member
+    *          Name or ID of the member.
+    */
+   public ObjectName fetchGatewayReceiverObjectName(String member)
+       throws Exception;
+ 
+   /**
+    * Returns a list of object names for the {@link GatewaySenderMXBean}s
+    * used to access all gateway senders.
+    *
+    * @return An array of object names or an empty array if no gateway senders
+    *         are found.
+    */
+   public ObjectName[] listGatewaySenderObjectNames();
+ 
+   /**
+    * Returns a list of object names for the {@link GatewaySenderMXBean}s
+    * used to access all gateway senders on a member.
+    *
+    * @param member
+    *          Name or ID of the member.
+    */
+   public ObjectName[] listGatewaySenderObjectNames(String member)
+       throws Exception;
+ 
+   /**
+    * Returns a list of object names for the {@link GatewayReceiverMXBean}s
+    * used to access all gateway senders.
+    *
+    * @return An array of object names or an empty array if no gateway receivers
+    *         are found.
+    */
+   public ObjectName[] listGatewayReceiverObjectNames();
+ 
+   /**
+    * Returns the object name for a {@link DistributedLockServiceMXBean} used to
+    * access a distributed lock service.
+    *
+    * @param lockServiceName
+    *          Name of the lock service.
+    */
+   public ObjectName fetchDistributedLockServiceObjectName(String lockServiceName)
+       throws Exception;
+ 
+   /**
+    * Returns the object name for a {@link LockServiceMXBean} used to
+    * access a lock service.
+    *
+    * @param member
+    *          Name or Id of the member.
+    * @param lockService
+    *          Name of the lock service.
+    */
+   public ObjectName fetchLockServiceObjectName(String member,
+       String lockService) throws Exception;
+ 
+   /**
+    * Returns object name of a {@link DiskStoreMXBean} for a given name and member
+    *
+    * @param member
+    *          name or id of the member
+    * @param diskStoreName
+    *          name of the disk store
+    * @return a ObjectName
+    * @throws Exception
+    */
+   public ObjectName fetchDiskStoreObjectName(String member,
+       String diskStoreName) throws Exception;
+ 
+   /**
+    * Returns the object name for a {@link CacheServerMXBean} used to access
+    * a cache server.
+    *
+    * @param member
+    *          Name or ID of the member.
+    * @param port
+    *          Port of the server.
+    */
+   public ObjectName fetchCacheServerObjectName(String member, int port)
+       throws Exception;
+ 
+   /**
+    * Returns a list of object names for the {@link CacheServerMXBean}s
+    * used to access all cache servers.
+    */
+   public ObjectName[] listCacheServerObjectNames();
+ 
+   /**
+    * Returns the number of map-reduce jobs currently running on all members
+    * in the distributed system.
+    */
+   public int getNumRunningFunctions();
+ 
+   /**
+    * Returns the number of CQs registers on all members.
+    */
+   public long getRegisteredCQCount();
+ 
+   /**
+    * Returns the number of bytes used on all disks.
+    */
+   public long getTotalDiskUsage();
+ 
+   /**
+    * Returns the total heap used on all members.
+    */
+   public long getUsedHeapSize();
+ 
+   /**
+    * Returns the average number of reads per second for all members.
+    */
+   public float getAverageReads();
+ 
+   /**
+    * Returns the average writes per second, including both put and putAll operations,
+    * for all members.
+    */
+   public float getAverageWrites();
+ 
+   /**
+    * Returns the number of subscriptions for all members.
+    */
+   public int getNumSubscriptions();
+ 
+ 
+   /**
+    * Returns the number of garbage collection operations for all members.
+    */
+   public long getGarbageCollectionCount();
+ 
+   /**
+    * Returns a map of remote distributed system IDs and the current connection
+    * status for each.
+    */
+   public Map<String, Boolean> viewRemoteClusterStatus();
+ 
+   /**
+    * Returns the number JVM pauses (which may or may not include full garbage
+    * collection pauses) detected by GemFire.
+    */
+   public long getJVMPauses();
+ 
+   /**
+    * This API is used to query data from GemFire system. This returns a JSON
+    * formated String having data and it's type. Type and value of data makes an
+    * array , type preceding the value.
+    * 
+    * e.g. {"result":[["java.lang.String","v"],["java.lang.String","b"]]}
+    * 
+    * GemFire PDXInstances are also supported. The type of PDXInstance is
+    * PDXInstance and value will be key value pair. There is no marker to know
+    * the "IdentityField" of the PDXInstance.
+    * 
+    * If the query is executed on the cluster and no member list is given in
+    * input first key of the JSON string will be "result" followed by the result
+    * set in JSON format.
+    * 
+    * If the query is executed on one or more specific members then returned
+    * string will have an array of "member" and "result" keys.
+    * 
+    * For query on replicated region data from a random node which have the
+    * region is shown, if no member input is given. For PR regions data from all
+    * the nodes are collected and shown. User must be careful to query on a PR if
+    * the region is big and hosted on a lot of nodes
+    * 
+    * Join queries on PR mandates that user provide one member as input. If the
+    * member does not host the regions or the regions are not co-located error
+    * string will be returned.
+    * 
+    * 
+    * @param queryString
+    *          GemFire supported OQL query
+    * @param members
+    *          comma separated list of members on which the query is to be
+    *          executed. It is not mandatory to give this input barring join
+    *          queries on PR. If member list is not provided query will be for
+    *          the whole cluster.
+    * @param limit
+    *          result set limit. If not set or 0 is passed default limit of 1000
+    *          will be set.
+    * @return a JSON formated string containing data and its type
+    */
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERYDATA_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERY)
+   public String queryData(String queryString, String members, int limit) throws Exception;
+   
+   /**
+    * 
+    * Functionality is same as queryData() method. Only difference being the
+    * resultant JSON string is compressed with Java GZIP with UTF-8 encoding. Any
+    * client application can de compress the byte[] using GZIP.
+    * 
+    * e.g.
+    * 
+    * GZIPInputStream gis = new GZIPInputStream(new ByteArrayInputStream(bytes));
+    * 
+    * BufferedReader bf = new BufferedReader(new InputStreamReader(gis,"UTF-8"));
+    * 
+    * String outStr = ""; String line; while ((line = bf.readLine()) != null) {
+    * outStr += line; }
+    * 
+    * @param queryString
+    *          GemFire supported OQL query
+    * @param members
+    *          comma separated list of members on which the query is to be
+    *          executed. It is not mandatory to give this input barring join
+    *          queries on PR. If member list is not provided query will be for
+    *          the whole cluster.
+    * @param limit
+    *          result set limit. If not set or 0 is passed default limit of 1000
+    *          will be set.
+    * @return a byte[] which is a compressed JSON string.
+    */
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERYDATA_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERY)
+   public byte[] queryDataForCompressedResult(String queryString, String members, int limit) throws Exception;
+   
+   
+   /**
+    * Returns the number of committed transactions across all members. It gives
+    * point in time value i.e. Number of tx committed at the time of reading this
+    * value
+    */
+   public int getTransactionCommitted();
+ 
+   /**
+    * Returns the number of transactions that were rolled back across all
+    * members. It gives point in time value i.e. Number of tx rolled back at the
+    * time of reading this value
+    */
+   public int getTransactionRolledBack();
+   
+   /**
+    * Number of rows DistributedSystemMXBean.queryData() operation will return.
+    * By default it will be 1000. User can modify this to control number of rows
+    * to be shown on Pulse, as Pulse DataBrowser internally uses
+    * DistributedSystemMXBean.queryData()
+    */
+   public int getQueryResultSetLimit();
+ 
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERYDATA_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERY)
+   public void setQueryResultSetLimit(int queryResultSetLimit);
+ 
+   /**
+    * Number of elements in a collection to be shown in queryData operation if
+    * query results contain collections like Map, List etc.
+    * 
+    */
+   public int getQueryCollectionsDepth();
+ 
 -  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERYDATA_DS)
++  @ResourceOperation( resource=Resource.DISTRIBUTED_SYSTEM, operation=ResourceConstants.QUERY)
+   public void setQueryCollectionsDepth(int queryCollectionsDepth);
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5c01d5f4/geode-core/src/main/java/com/gemstone/gemfire/management/GatewayReceiverMXBean.java
----------------------------------------------------------------------
diff --cc geode-core/src/main/java/com/gemstone/gemfire/management/GatewayReceiverMXBean.java
index 0000000,e3c483f..359e92e
mode 000000,100644..100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/management/GatewayReceiverMXBean.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/management/GatewayReceiverMXBean.java
@@@ -1,0 -1,211 +1,216 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management;
+ 
+ import com.gemstone.gemfire.cache.wan.GatewayReceiver;
++import com.gemstone.gemfire.management.internal.security.Resource;
++import com.gemstone.gemfire.management.internal.security.ResourceConstants;
++import com.gemstone.gemfire.management.internal.security.ResourceOperation;
+ 
+ /**
+  * MBean that provides access to information and management functionality for a
+  * {@link GatewayReceiver}.
+  * 
+  * @author rishim
+  * @since 7.0
+  * 
+  */
+ 
+ public interface GatewayReceiverMXBean {
+ 
+   /**
+    * Returns the port the receiver is listening on.
+    */
+   public int getPort();
+ 
+   /**
+    * Returns the configured buffer size of the socket connection.
+    */
+   public int getSocketBufferSize();
+ 
+   /**
+    * Returns the bind address on the host.
+    */
+   public String getBindAddress();
+ 
+   /**
+    * Returns the maximum amount of time between client pings.
+    */
+   public int getMaximumTimeBetweenPings();
+ 
+   /**
+    * Returns whether the receiver is in running state.
+    * 
+    * @return True if the receiver is in a running state, false otherwise.
+    */
+   public boolean isRunning();
+ 
+   /**
+    * Returns the instantaneous rate of events received.
+    */
+   public float getEventsReceivedRate();
+ 
+   /**
+    * Returns the rate of create requests received.
+    */
+   public float getCreateRequestsRate();
+ 
+   /**
+    * Returns the rate of update requests received.
+    */
+   public float getUpdateRequestsRate();
+ 
+   /**
+    * Returns the rate of destroy requests received.
+    */
+   public float getDestroyRequestsRate();
+ 
+   /**
+    * Returns the number of duplicate batches which have been received.
+    */
+   public int getDuplicateBatchesReceived();
+ 
+   /**
+    * Returns the number of batches which have been received out of order.
+    */
+   public int getOutoforderBatchesReceived();
+ 
+   /**
+    * Starts the gateway receiver.
+    */
++  @ResourceOperation(resource=Resource.GATEWAY_RECEIVER, operation=ResourceConstants.START_GW_RECEIVER)
+   public void start() throws Exception;
+ 
+   /**
+    * Stops the gateway receiver.
+    */
++  @ResourceOperation(resource=Resource.GATEWAY_RECEIVER, operation=ResourceConstants.STOP_GW_RECEIVER)
+   public void stop() throws Exception;
+ 
+   /**
+    * Returns the configured start port.
+    */
+   public int getStartPort();
+ 
+   /**
+    * Returns the configured end port.
+    */
+   public int getEndPort();
+   
+   /**
+    * Returns a list of names for the transport filters in use.
+    */
+   public String[] getGatewayTransportFilters();
+   
+   /**
+    * Returns the number of sockets accepted and used for client to server messaging.
+    */
+   public int getClientConnectionCount();
+   
+   /**
+    * Returns the number of client virtual machines connected and acting as a gateway.
+    */
+   public int getNumGateways();
+ 
+   /**
+    * Returns the average get request latency.
+    */
+   public long getGetRequestAvgLatency();
+ 
+   /**
+    * Returns the average put request latency.
+    */
+   public long getPutRequestAvgLatency();
+ 
+   /**
+    * Returns the total number of client connections that timed out and were
+    * closed.
+    */
+   public int getTotalConnectionsTimedOut();
+ 
+   /**
+    * Returns the total number of client connection requests that failed.
+    */
+   public int getTotalFailedConnectionAttempts();
+ 
+   /**
+    * Returns the current number of connections waiting for a thread to start
+    * processing their message.
+    */
+   public int getThreadQueueSize();
+ 
+   /**
+    * Returns the current number of threads handling a client connection.
+    */
+   public int getConnectionThreads();
+ 
+   /**
+    * Returns the load from client to server connections as reported by the load
+    * probe installed in this server.
+    */
+   public double getConnectionLoad();
+ 
+   /**
+    * Returns the estimate of how much load is added for each new connection as
+    * reported by the load probe installed in this server.
+    */
+   public double getLoadPerConnection();
+ 
+   /**
+    * Returns the load from queues as reported by the load probe installed in
+    * this server.
+    */
+   public double getQueueLoad();
+ 
+   /**
+    * Returns the estimate of how much load is added for each new queue as
+    * reported by the load probe installed in this server.
+    */
+   public double getLoadPerQueue();
+ 
+   /**
+    * Returns the rate of get requests.
+    */
+   public float getGetRequestRate();
+ 
+   /**
+    * Returns the rate of put requests.
+    */
+   public float getPutRequestRate();
+   
+   /**
+    * Returns the total number of bytes sent to clients.
+    */
+   public long getTotalSentBytes();
+ 
+   /**
+    * Returns the total number of bytes received from clients.
+    */
+   public long getTotalReceivedBytes();
+ 
+   /**
+    * Returns a list of the host and port information for gateway senders connected to
+    * this gateway receiver.
+    */
+   public String[] getConnectedGatewaySenders();  
+   
+   /**
+    * Returns the average batch processing time (in milliseconds).
+    */
+   public long getAverageBatchProcessingTime();
+ 
+ }


Mime
View raw message