Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,5 @@ scripts/*.pdf
scripts/warp_test.py
.pytest_cache/
venv-test/
thirdparty/rerun-loader-python-example-urdf/
thirdparty/rerun-loader-python-example-urdf/
thirdparty/curobo/
397 changes: 397 additions & 0 deletions jrl/deprecated.py

Large diffs are not rendered by default.

139 changes: 0 additions & 139 deletions jrl/math_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,145 +25,6 @@
)


class QP:
def __init__(self, Q, p, G, h, A=None, b=None):
"""Solve a quadratic program of the form
min 1/2 x^T Q x + p^T x
s.t. Gx <= h, Ax = b

Args:
Q (torch.Tensor): [nbatch x dim x dim] positive semidefinite matrix
p (torch.Tensor): [nbatch x dim] vector
G (torch.Tensor): [nbatch x nc x dim] matrix
h (torch.Tensor): [nbatch x nc] vector
A (torch.Tensor, optional): [nbatch x neq x dim] matrix. Defaults to
None.
b (torch.Tensor, optional): [nbatch x neq] vector. Defaults to None.

Raises:
NotImplementedError: Equality constraints not implemented

Returns:
torch.Tensor: [nbatch x dim] solution
"""
self.nbatch = Q.shape[0]
self.dim = Q.shape[1]
self.nc = G.shape[1]
self.Q = Q
assert len(p.shape) == 2, f"p.shape: {p.shape}"
self.p = p.unsqueeze(2)
self.G = G
assert len(h.shape) == 2, f"h.shape: {h.shape}"
self.h = h.unsqueeze(2)

assert (
self.nbatch == self.p.shape[0] == self.G.shape[0] == self.h.shape[0] == self.Q.shape[0]
), f"{self.nbatch}, {self.p.shape[0]}, {self.G.shape[0]}, {self.h.shape[0]}, {self.Q.shape[0]}"
assert (
self.dim == self.p.shape[1] == self.G.shape[2] == self.Q.shape[1] == self.Q.shape[2]
), f"{self.dim}, {self.p.shape[1]}, {self.G.shape[2]}, {self.Q.shape[1]}, {self.Q.shape[2]}"
assert self.nc == self.G.shape[1] == self.h.shape[1]

if A is not None or b is not None:
raise NotImplementedError("Equality constraints not implemented")
self.A = A
self.b = b

def solve(self, trace=False, iterlimit=None):
x = torch.zeros((self.nbatch, self.dim, 1), dtype=torch.float32, device=self.Q.device)
if trace:
trace = [x]
working_set = torch.zeros((self.nbatch, self.nc, 1), dtype=torch.bool, device=x.device)
converged = torch.zeros((self.nbatch, 1, 1), dtype=torch.bool, device=x.device)

if iterlimit is None:
iterlimit = 10 * self.nc

iterations = 0
while not torch.all(converged):
A = self.G * working_set
b = self.h * working_set

g = self.Q.bmm(x) + self.p
h = A.bmm(x) - b

AT = A.transpose(1, 2)
AQinvAT = A.bmm(torch.linalg.solve(self.Q, AT))
rhs = h - A.bmm(torch.linalg.solve(self.Q, g))
diag = torch.diag_embed(~working_set.squeeze(dim=2))
lmbd = torch.linalg.solve(AQinvAT + diag, rhs)
p = torch.linalg.solve(self.Q, -AT.bmm(lmbd) - g)

at_ws_sol = torch.linalg.norm(p, dim=1, keepdim=True) < 1e-3

lmbdmin, lmbdmin_idx = torch.min(
torch.where(
at_ws_sol & working_set,
lmbd,
torch.inf,
),
dim=1,
keepdim=True,
)

converged = converged | (at_ws_sol & (lmbdmin >= 0))

# remove inverted constraints from working set
working_set = working_set.scatter(
1,
lmbdmin_idx,
(~at_ws_sol | (lmbdmin >= 0)) & working_set.gather(1, lmbdmin_idx),
)

# check constraint violations
mask = ~at_ws_sol & ~working_set & (self.G.bmm(p) > 0)
alpha, alpha_idx = torch.min(
torch.where(
mask,
(self.h - self.G.bmm(x)) / (self.G.bmm(p)),
torch.inf,
),
dim=1,
keepdim=True,
)

# add violated constraints to working set
working_set = working_set.scatter(
1,
alpha_idx,
working_set.gather(1, alpha_idx) | (~at_ws_sol & (alpha < 1)),
)
alpha = torch.clamp(alpha, max=1)

# update solution
x = x + alpha * p * (~at_ws_sol & ~converged)
if trace:
trace.append(x.squeeze(2))

iterations += 1
if iterations > iterlimit:
# Qs = self.Q[~converged.flatten()]
# ps = self.p[~converged.flatten()]
# Gs = self.G[~converged.flatten()]
# hs = self.h[~converged.flatten()]
# xs = x[~converged.flatten()]
# for i in range(torch.sum(~converged)):
# print(f"Qs[{i}]:\n{Qs[i]}")
# print(f"ps[{i}]:\n{ps[i]}")
# print(f"Gs[{i}]:\n{Gs[i]}")
# print(f"hs[{i}]:\n{hs[i]}")
# print(f"xs[{i}]:\n{xs[i]}")
raise RuntimeError(
f"Failed to converge in {iterlimit} iterations\n\n{torch.sum(~converged).item()} out of"
f" {self.nbatch} not converged"
)

if trace:
return x.squeeze(2), trace

return x.squeeze(2)


# batch*n
def normalize_vector(v: torch.Tensor, return_mag: bool = False):
batch = v.shape[0]
Expand Down
40 changes: 40 additions & 0 deletions jrl/obstacles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@



@dataclass
class CuboidObstacle:
TODO


class BatchedCuboidObstacle:
TODO



def env_collision_distances(self, x: torch.Tensor, cuboid: torch.Tensor, Tcuboid: torch.Tensor) -> torch.Tensor:
"""Returns the distance between the robot collision capsules and the environment cuboid obstacle for each joint
angle vector in x.

Args:
x (torch.Tensor): [n x ndofs] joint angle vectors
cuboid (torch.Tensor): [6] cuboid xyz min and xyz max
Tcuboid (torch.Tensor): [4 x 4] cuboid poses

Returns:
torch.Tensor: [n x n_capsules] distances
"""
TODO
raise NotImplementedError("Implement self_collision_distances_jacobian")

def env_collision_distances_jacobian(
self, x: torch.Tensor, cuboid: torch.Tensor, Tcuboid: torch.Tensor
) -> torch.Tensor:
TODO
raise NotImplementedError("Implement self_collision_distances_jacobian")







Loading