import torch
from tensorly.decomposition import non_negative_tucker
import tensorly as tl
from tensorly import tucker_to_tensor
from sklearn.cluster import k_means
from tensorly.tucker_tensor import tucker_to_tensor
from tensorly.random import check_random_state
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import matplotlib.pyplot as plt
tl.set_backend('pytorch')
X = torch.tensor([[1, 2], [2, 2], [2, 1], [1, 1], [4, 4], [4, 5], [5, 4], [5, 5]]).float()
y = torch.tensor([1, 1, 1, 1, 0, 0, 0, 0]).long()
class Perceptron_sigmod(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
nn.Module.__init__(self)
self.layer1 = nn.Linear(2, 2)
self.s = nn.Sigmoid()
def forward(self, x):
x = self.layer1(x)
#print(x.shape)
core, factors = non_negative_tucker(x, rank = [8, 2], ranks=[8, 2])
output = x * self.s(core)
return output
perceptron_s = Perceptron_sigmod(2, 4, 2)
optimizer1 = torch.optim.SGD(perceptron_s.parameters(), lr=0.01)
c2 = torch.nn.CrossEntropyLoss()
for step in range(1000):
optimizer1.zero_grad()
output = perceptron_s(X)
loss1 = c2(output, y)
loss1.backward()
optimizer1.step()
print(step)
print(loss1)