pinocchio  3.7.0
A fast and flexible implementation of Rigid Body Dynamics algorithms and their analytical derivatives
 
Loading...
Searching...
No Matches
continuous Namespace Reference

Classes

class  PolicyNetwork
 
class  QValueNetwork
 
class  ReplayItem
 

Functions

 rendertrial (maxiter=NSTEPS, verbose=True)
 

Variables

 batch
 
int BATCH_SIZE = 64
 
 d_batch = np.vstack([b.done for b in batch])
 
float DECAY_RATE = 0.99
 
bool done = False
 
 env = Pendulum(1)
 
 feed_dict
 
list h_qva = []
 
list h_rwd = []
 
list h_ste = []
 
tuple maxq
 
 n_init = tflearn.initializations.truncated_normal(seed=RANDOM_SEED)
 
int NEPISODES = 100
 
int NH1 = 250
 
int NSTEPS = 100
 
 NU = env.nu
 
 NX = env.nobs
 
 optim
 
 policy = PolicyNetwork().setupOptim()
 
float POLICY_LEARNING_RATE = 0.0001
 
 policyTarget = PolicyNetwork().setupTargetAssign(policy)
 
 q2_batch
 
 qgrad
 
 qref_batch = r_batch + (not d_batch) * (DECAY_RATE * q2_batch)
 
 qvalue = QValueNetwork().setupOptim()
 
float QVALUE_LEARNING_RATE = 0.001
 
 qvalueTarget = QValueNetwork().setupTargetAssign(qvalue)
 
 r
 
 r_batch = np.vstack([b.reward for b in batch])
 
 RANDOM_SEED = int((time.time() % 10) * 1000)
 
int REPLAY_SIZE = 10000
 
 replayDeque = deque()
 
float rsum = 0.0
 
 sess = tf.InteractiveSession()
 
 u = sess.run(policy.policy, feed_dict={policy.x: x})
 
 u2_batch
 
 u_batch = np.vstack([b.u for b in batch])
 
 u_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003, seed=RANDOM_SEED)
 
 u_targ = sess.run(policy.policy, feed_dict={policy.x: x_batch})
 
float UPDATE_RATE = 0.01
 
 withSinCos
 
 x = env.reset().T
 
 x2 = x2.T
 
 x2_batch = np.vstack([b.x2 for b in batch])
 
 x_batch = np.vstack([b.x for b in batch])
 

Detailed Description

Deep actor-critic network,
From "Continuous control with deep reinforcement learning",
by Lillicrap et al, arXiv:1509.02971

Function Documentation

◆ rendertrial()

rendertrial (   maxiter = NSTEPS,
  verbose = True 
)

Definition at line 157 of file continuous.py.

Variable Documentation

◆ batch

batch
Initial value:
1= random.sample(
2 replayDeque, BATCH_SIZE
3 )

Definition at line 204 of file continuous.py.

◆ BATCH_SIZE

int BATCH_SIZE = 64

Definition at line 35 of file continuous.py.

◆ d_batch

d_batch = np.vstack([b.done for b in batch])

Definition at line 210 of file continuous.py.

◆ DECAY_RATE

float DECAY_RATE = 0.99

Definition at line 32 of file continuous.py.

◆ done

bool done = False

Definition at line 190 of file continuous.py.

◆ env

env = Pendulum(1)

Definition at line 39 of file continuous.py.

◆ feed_dict

feed_dict

Definition at line 226 of file continuous.py.

◆ h_qva

list h_qva = []

Definition at line 176 of file continuous.py.

◆ h_rwd

list h_rwd = []

Definition at line 175 of file continuous.py.

◆ h_ste

list h_ste = []

Definition at line 177 of file continuous.py.

◆ maxq

tuple maxq
Initial value:
1= (
2 np.max(
3 sess.run(qvalue.qvalue, feed_dict={qvalue.x: x_batch, qvalue.u: u_batch})
4 )
5 if "x_batch" in locals()
6 else 0
7 )

Definition at line 250 of file continuous.py.

◆ n_init

n_init = tflearn.initializations.truncated_normal(seed=RANDOM_SEED)

Definition at line 24 of file continuous.py.

◆ NEPISODES

int NEPISODES = 100

Definition at line 28 of file continuous.py.

◆ NH1

int NH1 = 250

Definition at line 36 of file continuous.py.

◆ NSTEPS

int NSTEPS = 100

Definition at line 29 of file continuous.py.

◆ NU

NU = env.nu

Definition at line 42 of file continuous.py.

◆ NX

NX = env.nobs

Definition at line 41 of file continuous.py.

◆ optim

optim

Definition at line 240 of file continuous.py.

◆ policy

policy = PolicyNetwork().setupOptim()

Definition at line 143 of file continuous.py.

◆ POLICY_LEARNING_RATE

float POLICY_LEARNING_RATE = 0.0001

Definition at line 31 of file continuous.py.

◆ policyTarget

policyTarget = PolicyNetwork().setupTargetAssign(policy)

Definition at line 144 of file continuous.py.

◆ q2_batch

q2_batch
Initial value:
1= sess.run(
2 qvalueTarget.qvalue,
3 feed_dict={qvalueTarget.x: x2_batch, qvalueTarget.u: u2_batch},
4 )

Definition at line 217 of file continuous.py.

◆ qgrad

qgrad
Initial value:
1= sess.run(
2 qvalue.gradient, feed_dict={qvalue.x: x_batch, qvalue.u: u_targ}
3 )

Definition at line 235 of file continuous.py.

◆ qref_batch

qref_batch = r_batch + (not d_batch) * (DECAY_RATE * q2_batch)

Definition at line 221 of file continuous.py.

◆ qvalue

qvalue = QValueNetwork().setupOptim()

Definition at line 146 of file continuous.py.

◆ QVALUE_LEARNING_RATE

float QVALUE_LEARNING_RATE = 0.001

Definition at line 30 of file continuous.py.

◆ qvalueTarget

qvalueTarget = QValueNetwork().setupTargetAssign(qvalue)

Definition at line 147 of file continuous.py.

◆ r

r

Definition at line 188 of file continuous.py.

◆ r_batch

r_batch = np.vstack([b.reward for b in batch])

Definition at line 209 of file continuous.py.

◆ RANDOM_SEED

RANDOM_SEED = int((time.time() % 10) * 1000)

Definition at line 19 of file continuous.py.

◆ REPLAY_SIZE

int REPLAY_SIZE = 10000

Definition at line 34 of file continuous.py.

◆ replayDeque

replayDeque = deque()

Definition at line 139 of file continuous.py.

◆ rsum

float rsum = 0.0

Definition at line 182 of file continuous.py.

◆ sess

sess = tf.InteractiveSession()

Definition at line 149 of file continuous.py.

◆ u

u = sess.run(policy.policy, feed_dict={policy.x: x})

Definition at line 186 of file continuous.py.

◆ u2_batch

u2_batch
Initial value:
1= sess.run(
2 policyTarget.policy, feed_dict={policyTarget.x: x2_batch}
3 )

Definition at line 214 of file continuous.py.

◆ u_batch

u_batch = np.vstack([b.u for b in batch])

Definition at line 208 of file continuous.py.

◆ u_init

u_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003, seed=RANDOM_SEED)

Definition at line 25 of file continuous.py.

◆ u_targ

u_targ = sess.run(policy.policy, feed_dict={policy.x: x_batch})

Definition at line 234 of file continuous.py.

◆ UPDATE_RATE

float UPDATE_RATE = 0.01

Definition at line 33 of file continuous.py.

◆ withSinCos

withSinCos

Definition at line 40 of file continuous.py.

◆ x

x = env.reset().T

Definition at line 181 of file continuous.py.

◆ x2

x2 = x2.T

Definition at line 188 of file continuous.py.

◆ x2_batch

x2_batch = np.vstack([b.x2 for b in batch])

Definition at line 211 of file continuous.py.

◆ x_batch

x_batch = np.vstack([b.x for b in batch])

Definition at line 207 of file continuous.py.