[ 
https://issues.apache.org/jira/browse/RANGER-3513?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

ruiliang updated RANGER-3513:
-----------------------------
    Description: 
add new policy ,All Ranger requests are 10s+.Print jstatck: some threads are 
locked.Does the external request lastKnownVersion not change?
 * top  61905 Threads cpu use  99.3%

{code:java}
KiB Swap:        0 total,        0 free,        0 used. 57020896 avail Mem 
  PID USER      PR  NI    VIRT    RES    SHR S %CPU %MEM     TIME+ COMMAND      
                                                                                
                                                                            
61905 root      20   0 20.748g 4.689g  23760 R 99.3  7.6   4:32.52 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 3500 root      20   0 20.748g 4.689g  23760 S  8.3  7.6   0:00.31 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
45584 root      20   0 20.748g 4.689g  23760 S  6.3  7.6   0:02.76 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
62387 root      20   0 20.748g 4.689g  23760 S  6.3  7.6   0:02.32 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7230 root      20   0 20.748g 4.689g  23760 S  5.3  7.6   0:00.42 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 3412 root      20   0 20.748g 4.689g  23760 S  5.0  7.6   0:00.53 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
45616 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:03.50 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 3498 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:00.92 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7238 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:00.28 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7096 root      20   0 20.748g 4.689g  23760 S  1.3  7.6   0:00.05 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52641 root      20   0 20.748g 4.689g  23760 S  0.3  7.6   1:33.20 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7229 root      20   0 20.748g 4.689g  23760 S  0.3  7.6   0:00.30 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52032 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   0:00.00 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52034 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   0:00.84 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52035 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:40.21 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52036 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.32 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52037 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:36.96 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52038 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.96 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52039 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.80 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52040 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.05 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52041 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.59 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52042 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.29 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52043 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.88 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52044 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:39.19 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52045 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.46 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
root@fs-hiido-ranger-21-117-150:/home/liangrui# printf "%x\n"  61905
f1d1
root@fs-hiido-ranger-21-117-150:/home/liangrui# 
root@fs-hiido-ranger-21-117-150:/home/liangrui# jstack  52032 | grep -20 f1d1
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at 
org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
        at java.lang.Thread.run(Thread.java:748)
"http-bio-6080-exec-9692" #9852 daemon prio=5 os_prio=0 tid=0x00007f0bf003a800 
nid=0xf3b2 waiting on condition [0x00007f0c287cc000]
   java.lang.Thread.State: TIMED_WAITING (parking)
        at sun.misc.Unsafe.park(Native Method)
        - parking to wait for  <0x00000006c72df908> (a 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject)
        at 
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
        at 
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
        at org.apache.tomcat.util.threads.TaskQueue.poll(TaskQueue.java:86)
        at org.apache.tomcat.util.threads.TaskQueue.poll(TaskQueue.java:32)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1073)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at 
org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
        at java.lang.Thread.run(Thread.java:748)
"Thread-114" #9851 daemon prio=5 os_prio=0 tid=0x00007f0a14006800 nid=0xf1d1 
runnable [0x00007f0c295d7000]
   java.lang.Thread.State: RUNNABLE
        at java.lang.reflect.Field.get(Field.java:393)
        at 
org.eclipse.persistence.internal.descriptors.InstanceVariableAttributeAccessor.getAttributeValueFromObject(InstanceVariableAttributeAccessor.java:76)
        at 
org.eclipse.persistence.mappings.DatabaseMapping.getAttributeValueFromObject(DatabaseMapping.java:630)
        at 
org.eclipse.persistence.mappings.foundation.AbstractDirectMapping.valueFromObject(AbstractDirectMapping.java:1176)
        at 
org.eclipse.persistence.internal.descriptors.ObjectBuilder.extractPrimaryKeyFromObject(ObjectBuilder.java:3011)
        at 
org.eclipse.persistence.internal.descriptors.ObjectBuilder.createObjectChangeSet(ObjectBuilder.java:2885)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.createObjectChangeSetThroughComparison(DeferredChangeDetectionPolicy.java:155)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.createObjectChangeSet(DeferredChangeDetectionPolicy.java:146)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.calculateChanges(DeferredChangeDetectionPolicy.java:91)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.calculateChangesForExistingObject(DeferredChangeDetectionPolicy.java:56)
        at 
org.eclipse.persistence.internal.sessions.UnitOfWorkImpl.calculateChanges(UnitOfWorkImpl.java:664)
        at 
org.eclipse.persistence.internal.sessions.RepeatableWriteUnitOfWork.writeChanges(RepeatableWriteUnitOfWork.java:438)
        at 
org.eclipse.persistence.internal.jpa.EntityManagerImpl.flush(EntityManagerImpl.java:863)
        at 
org.eclipse.persistence.internal.jpa.QueryImpl.performPreQueryFlush(QueryImpl.java:963)
        at 
org.eclipse.persistence.internal.jpa.QueryImpl.executeReadQuery(QueryImpl.java:207)
        at 
org.eclipse.persistence.internal.jpa.QueryImpl.getResultList(QueryImpl.java:469)
        at 
org.apache.ranger.db.XXPolicyResourceMapDao.findByPolicyId(XXPolicyResourceMapDao.java:56)
        at 
org.apache.ranger.biz.RangerPolicyRetriever$RetrieverContext.<init>(RangerPolicyRetriever.java:552)
        at 
org.apache.ranger.biz.RangerPolicyRetriever$RetrieverContext.getAllPoliciesBySecondary(RangerPolicyRetriever.java:671)
     {code}

 * Many such request logs
spark app run

{code:java}
10.12.11.16 - - [11/Nov/2021:17:12:28 +0800] "GET 
/service/plugins/secure/policies/download/spark_offline?lastKnownVersion=29397&lastActivationTime=1636621258729&pluginId=sparkSql%40fs-hiido-dn-12-11-16.hiido.host.yydevops.com-spark_offline&clusterName=
 HTTP/1.1" 304 - "-" "Java/1.8.0_201" {code}
practical
{code:java}
{{ { "configs": \{ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", 
"jdbc.url": "jdbc:hive2://10.21.112.3:30000", "password": "*****", 
"policy.download.auth.users": "*", "policy.grantrevoke.auth.users": "*", 
"tag.download.auth.users": "*", "username": "hdfs" }
, "createTime": 1573165348000, "createdBy": "Admin", "description": 
"spark_offline cluster", "guid": "ddb5305b-d145-44f7-af09-77b41d28959f", "id": 
17, "isEnabled": true, "name": "spark_offline", "policyUpdateTime": 
1636621544000, "policyVersion": 29412, "tagUpdateTime": 1573194148000, 
"tagVersion": 1, "type": "hive", "updateTime": 1573432854000, "updatedBy": 
"Admin", "version": 13 },}}

 {code}
lastKnownVersion=29397 >policyVersion": 29412 ?? When does this happen

After the ranger service is restarted, the ranger service is restored
The request log becomes lastKnownVersion=29412

lastKnownVersion Where did you get it from? Is the cache not flushed yet?

` boolean getLatestOrCached(String serviceName, ServiceStore serviceStore) 
throws Exception {
boolean ret = false;
{code:java}
//
try {
                        ret = lock.tryLock(waitTimeInSeconds, TimeUnit.SECONDS);
                        if (ret) {
                                getLatest(serviceName, serviceStore);
                        }
                } catch (InterruptedException exception) {
                        LOG.error("getLatestOrCached:lock got interrupted..", 
exception);
                } finally {
                        if (ret) {
                                lock.unlock();
                        }
                }

                return ret;
        } {code}
 

If the policy name is marked with an asterisk (*), the deadlock will be 
triggered, and all requests will wait for 10s+ example add policy name

 

"name":"udf-resource-policy-*-com.xx.ent.hive.udf.DecryptUDF_presto.decrypt-2147454847",

 
 * This is the full JStack in question

[^ranger_1_1115.jstack]

  was:
add new policy ,All Ranger requests are 10s+.Print jstatck: some threads are 
locked.Does the external request lastKnownVersion not change?
 * A lot of these threads
{code:java}
KiB Swap:        0 total,        0 free,        0 used. 57020896 avail Mem 
  PID USER      PR  NI    VIRT    RES    SHR S %CPU %MEM     TIME+ COMMAND      
                                                                                
                                                                            
61905 root      20   0 20.748g 4.689g  23760 R 99.3  7.6   4:32.52 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 3500 root      20   0 20.748g 4.689g  23760 S  8.3  7.6   0:00.31 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
45584 root      20   0 20.748g 4.689g  23760 S  6.3  7.6   0:02.76 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
62387 root      20   0 20.748g 4.689g  23760 S  6.3  7.6   0:02.32 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7230 root      20   0 20.748g 4.689g  23760 S  5.3  7.6   0:00.42 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 3412 root      20   0 20.748g 4.689g  23760 S  5.0  7.6   0:00.53 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
45616 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:03.50 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 3498 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:00.92 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7238 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:00.28 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7096 root      20   0 20.748g 4.689g  23760 S  1.3  7.6   0:00.05 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52641 root      20   0 20.748g 4.689g  23760 S  0.3  7.6   1:33.20 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
 7229 root      20   0 20.748g 4.689g  23760 S  0.3  7.6   0:00.30 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52032 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   0:00.00 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52034 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   0:00.84 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52035 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:40.21 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52036 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.32 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52037 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:36.96 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52038 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.96 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52039 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.80 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52040 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.05 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52041 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.59 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52042 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.29 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52043 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.88 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52044 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:39.19 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
52045 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.46 java 
-Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
-Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
-Dhostname=fs-hiido-range+ 
root@fs-hiido-ranger-21-117-150:/home/liangrui# printf "%x\n"  61905
f1d1
root@fs-hiido-ranger-21-117-150:/home/liangrui# 
root@fs-hiido-ranger-21-117-150:/home/liangrui# jstack  52032 | grep -20 f1d1
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at 
org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
        at java.lang.Thread.run(Thread.java:748)
"http-bio-6080-exec-9692" #9852 daemon prio=5 os_prio=0 tid=0x00007f0bf003a800 
nid=0xf3b2 waiting on condition [0x00007f0c287cc000]
   java.lang.Thread.State: TIMED_WAITING (parking)
        at sun.misc.Unsafe.park(Native Method)
        - parking to wait for  <0x00000006c72df908> (a 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject)
        at 
java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
        at 
java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
        at org.apache.tomcat.util.threads.TaskQueue.poll(TaskQueue.java:86)
        at org.apache.tomcat.util.threads.TaskQueue.poll(TaskQueue.java:32)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1073)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at 
org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
        at java.lang.Thread.run(Thread.java:748)
"Thread-114" #9851 daemon prio=5 os_prio=0 tid=0x00007f0a14006800 nid=0xf1d1 
runnable [0x00007f0c295d7000]
   java.lang.Thread.State: RUNNABLE
        at java.lang.reflect.Field.get(Field.java:393)
        at 
org.eclipse.persistence.internal.descriptors.InstanceVariableAttributeAccessor.getAttributeValueFromObject(InstanceVariableAttributeAccessor.java:76)
        at 
org.eclipse.persistence.mappings.DatabaseMapping.getAttributeValueFromObject(DatabaseMapping.java:630)
        at 
org.eclipse.persistence.mappings.foundation.AbstractDirectMapping.valueFromObject(AbstractDirectMapping.java:1176)
        at 
org.eclipse.persistence.internal.descriptors.ObjectBuilder.extractPrimaryKeyFromObject(ObjectBuilder.java:3011)
        at 
org.eclipse.persistence.internal.descriptors.ObjectBuilder.createObjectChangeSet(ObjectBuilder.java:2885)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.createObjectChangeSetThroughComparison(DeferredChangeDetectionPolicy.java:155)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.createObjectChangeSet(DeferredChangeDetectionPolicy.java:146)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.calculateChanges(DeferredChangeDetectionPolicy.java:91)
        at 
org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.calculateChangesForExistingObject(DeferredChangeDetectionPolicy.java:56)
        at 
org.eclipse.persistence.internal.sessions.UnitOfWorkImpl.calculateChanges(UnitOfWorkImpl.java:664)
        at 
org.eclipse.persistence.internal.sessions.RepeatableWriteUnitOfWork.writeChanges(RepeatableWriteUnitOfWork.java:438)
        at 
org.eclipse.persistence.internal.jpa.EntityManagerImpl.flush(EntityManagerImpl.java:863)
        at 
org.eclipse.persistence.internal.jpa.QueryImpl.performPreQueryFlush(QueryImpl.java:963)
        at 
org.eclipse.persistence.internal.jpa.QueryImpl.executeReadQuery(QueryImpl.java:207)
        at 
org.eclipse.persistence.internal.jpa.QueryImpl.getResultList(QueryImpl.java:469)
        at 
org.apache.ranger.db.XXPolicyResourceMapDao.findByPolicyId(XXPolicyResourceMapDao.java:56)
        at 
org.apache.ranger.biz.RangerPolicyRetriever$RetrieverContext.<init>(RangerPolicyRetriever.java:552)
        at 
org.apache.ranger.biz.RangerPolicyRetriever$RetrieverContext.getAllPoliciesBySecondary(RangerPolicyRetriever.java:671)
     {code}

 * Many such request logs
spark app run

{code:java}
10.12.11.16 - - [11/Nov/2021:17:12:28 +0800] "GET 
/service/plugins/secure/policies/download/spark_offline?lastKnownVersion=29397&lastActivationTime=1636621258729&pluginId=sparkSql%40fs-hiido-dn-12-11-16.hiido.host.yydevops.com-spark_offline&clusterName=
 HTTP/1.1" 304 - "-" "Java/1.8.0_201" {code}

practical

{code:java}
{{ { "configs": \{ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", 
"jdbc.url": "jdbc:hive2://10.21.112.3:30000", "password": "*****", 
"policy.download.auth.users": "*", "policy.grantrevoke.auth.users": "*", 
"tag.download.auth.users": "*", "username": "hdfs" }
, "createTime": 1573165348000, "createdBy": "Admin", "description": 
"spark_offline cluster", "guid": "ddb5305b-d145-44f7-af09-77b41d28959f", "id": 
17, "isEnabled": true, "name": "spark_offline", "policyUpdateTime": 
1636621544000, "policyVersion": 29412, "tagUpdateTime": 1573194148000, 
"tagVersion": 1, "type": "hive", "updateTime": 1573432854000, "updatedBy": 
"Admin", "version": 13 },}}

 {code}
lastKnownVersion=29397 >policyVersion": 29412 ?? When does this happen

After the ranger service is restarted, the ranger service is restored
The request log becomes lastKnownVersion=29412

lastKnownVersion Where did you get it from? Is the cache not flushed yet?

` boolean getLatestOrCached(String serviceName, ServiceStore serviceStore) 
throws Exception {
boolean ret = false;
{code:java}
//
try {
                        ret = lock.tryLock(waitTimeInSeconds, TimeUnit.SECONDS);
                        if (ret) {
                                getLatest(serviceName, serviceStore);
                        }
                } catch (InterruptedException exception) {
                        LOG.error("getLatestOrCached:lock got interrupted..", 
exception);
                } finally {
                        if (ret) {
                                lock.unlock();
                        }
                }

                return ret;
        } {code}
 

If the policy name is marked with an asterisk (*), the deadlock will be 
triggered, and all requests will wait for 10s+ example add policy name

 

"name":"udf-resource-policy-*-com.xx.ent.hive.udf.DecryptUDF_presto.decrypt-2147454847",

 
 * This is the full JStack in question

[^ranger_1_1115.jstack]


> locks getLatestOrCached(RangerServicePoliciesCache.java:198) (ranger 1.2)
> -------------------------------------------------------------------------
>
>                 Key: RANGER-3513
>                 URL: https://issues.apache.org/jira/browse/RANGER-3513
>             Project: Ranger
>          Issue Type: Bug
>          Components: admin
>    Affects Versions: 1.2.0
>            Reporter: ruiliang
>            Priority: Major
>         Attachments: ranger_1_1115.jstack
>
>   Original Estimate: 12h
>  Remaining Estimate: 12h
>
> add new policy ,All Ranger requests are 10s+.Print jstatck: some threads are 
> locked.Does the external request lastKnownVersion not change?
>  * top  61905 Threads cpu use  99.3%
> {code:java}
> KiB Swap:        0 total,        0 free,        0 used. 57020896 avail Mem 
>   PID USER      PR  NI    VIRT    RES    SHR S %CPU %MEM     TIME+ COMMAND    
>                                                                               
>                                                                               
>   
> 61905 root      20   0 20.748g 4.689g  23760 R 99.3  7.6   4:32.52 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  3500 root      20   0 20.748g 4.689g  23760 S  8.3  7.6   0:00.31 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 45584 root      20   0 20.748g 4.689g  23760 S  6.3  7.6   0:02.76 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 62387 root      20   0 20.748g 4.689g  23760 S  6.3  7.6   0:02.32 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  7230 root      20   0 20.748g 4.689g  23760 S  5.3  7.6   0:00.42 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  3412 root      20   0 20.748g 4.689g  23760 S  5.0  7.6   0:00.53 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 45616 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:03.50 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  3498 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:00.92 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  7238 root      20   0 20.748g 4.689g  23760 S  4.3  7.6   0:00.28 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  7096 root      20   0 20.748g 4.689g  23760 S  1.3  7.6   0:00.05 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52641 root      20   0 20.748g 4.689g  23760 S  0.3  7.6   1:33.20 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
>  7229 root      20   0 20.748g 4.689g  23760 S  0.3  7.6   0:00.30 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52032 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   0:00.00 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52034 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   0:00.84 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52035 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:40.21 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52036 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.32 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52037 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:36.96 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52038 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.96 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52039 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.80 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52040 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.05 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52041 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.59 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52042 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.29 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52043 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:37.88 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52044 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:39.19 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> 52045 root      20   0 20.748g 4.689g  23760 S  0.0  7.6   7:38.46 java 
> -Dproc_rangeradmin -XX:MetaspaceSize=512M -Xmx4024m -Xms4024m 
> -Duser.timezone=UTC -Dzookeeper.sasl.client.username=zookeeper -Duser=root 
> -Dhostname=fs-hiido-range+ 
> root@fs-hiido-ranger-21-117-150:/home/liangrui# printf "%x\n"  61905
> f1d1
> root@fs-hiido-ranger-21-117-150:/home/liangrui# 
> root@fs-hiido-ranger-21-117-150:/home/liangrui# jstack  52032 | grep -20 f1d1
>         at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>         at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>         at 
> org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
>         at java.lang.Thread.run(Thread.java:748)
> "http-bio-6080-exec-9692" #9852 daemon prio=5 os_prio=0 
> tid=0x00007f0bf003a800 nid=0xf3b2 waiting on condition [0x00007f0c287cc000]
>    java.lang.Thread.State: TIMED_WAITING (parking)
>         at sun.misc.Unsafe.park(Native Method)
>         - parking to wait for  <0x00000006c72df908> (a 
> java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject)
>         at 
> java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
>         at 
> java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
>         at 
> java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:467)
>         at org.apache.tomcat.util.threads.TaskQueue.poll(TaskQueue.java:86)
>         at org.apache.tomcat.util.threads.TaskQueue.poll(TaskQueue.java:32)
>         at 
> java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1073)
>         at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
>         at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>         at 
> org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
>         at java.lang.Thread.run(Thread.java:748)
> "Thread-114" #9851 daemon prio=5 os_prio=0 tid=0x00007f0a14006800 nid=0xf1d1 
> runnable [0x00007f0c295d7000]
>    java.lang.Thread.State: RUNNABLE
>         at java.lang.reflect.Field.get(Field.java:393)
>         at 
> org.eclipse.persistence.internal.descriptors.InstanceVariableAttributeAccessor.getAttributeValueFromObject(InstanceVariableAttributeAccessor.java:76)
>         at 
> org.eclipse.persistence.mappings.DatabaseMapping.getAttributeValueFromObject(DatabaseMapping.java:630)
>         at 
> org.eclipse.persistence.mappings.foundation.AbstractDirectMapping.valueFromObject(AbstractDirectMapping.java:1176)
>         at 
> org.eclipse.persistence.internal.descriptors.ObjectBuilder.extractPrimaryKeyFromObject(ObjectBuilder.java:3011)
>         at 
> org.eclipse.persistence.internal.descriptors.ObjectBuilder.createObjectChangeSet(ObjectBuilder.java:2885)
>         at 
> org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.createObjectChangeSetThroughComparison(DeferredChangeDetectionPolicy.java:155)
>         at 
> org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.createObjectChangeSet(DeferredChangeDetectionPolicy.java:146)
>         at 
> org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.calculateChanges(DeferredChangeDetectionPolicy.java:91)
>         at 
> org.eclipse.persistence.descriptors.changetracking.DeferredChangeDetectionPolicy.calculateChangesForExistingObject(DeferredChangeDetectionPolicy.java:56)
>         at 
> org.eclipse.persistence.internal.sessions.UnitOfWorkImpl.calculateChanges(UnitOfWorkImpl.java:664)
>         at 
> org.eclipse.persistence.internal.sessions.RepeatableWriteUnitOfWork.writeChanges(RepeatableWriteUnitOfWork.java:438)
>         at 
> org.eclipse.persistence.internal.jpa.EntityManagerImpl.flush(EntityManagerImpl.java:863)
>         at 
> org.eclipse.persistence.internal.jpa.QueryImpl.performPreQueryFlush(QueryImpl.java:963)
>         at 
> org.eclipse.persistence.internal.jpa.QueryImpl.executeReadQuery(QueryImpl.java:207)
>         at 
> org.eclipse.persistence.internal.jpa.QueryImpl.getResultList(QueryImpl.java:469)
>         at 
> org.apache.ranger.db.XXPolicyResourceMapDao.findByPolicyId(XXPolicyResourceMapDao.java:56)
>         at 
> org.apache.ranger.biz.RangerPolicyRetriever$RetrieverContext.<init>(RangerPolicyRetriever.java:552)
>         at 
> org.apache.ranger.biz.RangerPolicyRetriever$RetrieverContext.getAllPoliciesBySecondary(RangerPolicyRetriever.java:671)
>      {code}
>  * Many such request logs
> spark app run
> {code:java}
> 10.12.11.16 - - [11/Nov/2021:17:12:28 +0800] "GET 
> /service/plugins/secure/policies/download/spark_offline?lastKnownVersion=29397&lastActivationTime=1636621258729&pluginId=sparkSql%40fs-hiido-dn-12-11-16.hiido.host.yydevops.com-spark_offline&clusterName=
>  HTTP/1.1" 304 - "-" "Java/1.8.0_201" {code}
> practical
> {code:java}
> {{ { "configs": \{ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", 
> "jdbc.url": "jdbc:hive2://10.21.112.3:30000", "password": "*****", 
> "policy.download.auth.users": "*", "policy.grantrevoke.auth.users": "*", 
> "tag.download.auth.users": "*", "username": "hdfs" }
> , "createTime": 1573165348000, "createdBy": "Admin", "description": 
> "spark_offline cluster", "guid": "ddb5305b-d145-44f7-af09-77b41d28959f", 
> "id": 17, "isEnabled": true, "name": "spark_offline", "policyUpdateTime": 
> 1636621544000, "policyVersion": 29412, "tagUpdateTime": 1573194148000, 
> "tagVersion": 1, "type": "hive", "updateTime": 1573432854000, "updatedBy": 
> "Admin", "version": 13 },}}
>  {code}
> lastKnownVersion=29397 >policyVersion": 29412 ?? When does this happen
> After the ranger service is restarted, the ranger service is restored
> The request log becomes lastKnownVersion=29412
> lastKnownVersion Where did you get it from? Is the cache not flushed yet?
> ` boolean getLatestOrCached(String serviceName, ServiceStore serviceStore) 
> throws Exception {
> boolean ret = false;
> {code:java}
> //
> try {
>                       ret = lock.tryLock(waitTimeInSeconds, TimeUnit.SECONDS);
>                       if (ret) {
>                               getLatest(serviceName, serviceStore);
>                       }
>               } catch (InterruptedException exception) {
>                       LOG.error("getLatestOrCached:lock got interrupted..", 
> exception);
>               } finally {
>                       if (ret) {
>                               lock.unlock();
>                       }
>               }
>               return ret;
>       } {code}
>  
> If the policy name is marked with an asterisk (*), the deadlock will be 
> triggered, and all requests will wait for 10s+ example add policy name
>  
> "name":"udf-resource-policy-*-com.xx.ent.hive.udf.DecryptUDF_presto.decrypt-2147454847",
>  
>  * This is the full JStack in question
> [^ranger_1_1115.jstack]



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

Reply via email to