Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: Autapomorph/dino
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: master
Choose a base ref
...
head repository: U1F30C/dino-ai
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: master
Choose a head ref
Can’t automatically merge. Don’t worry, you can still create the pull request.
  • 18 commits
  • 12 files changed
  • 2 contributors

Commits on Nov 24, 2020

  1. bckp

    Eric L. Solis committed Nov 24, 2020
    Copy the full SHA
    e453e54 View commit details

Commits on Nov 25, 2020

  1. implement ga

    U1F30C committed Nov 25, 2020
    Copy the full SHA
    8995b2e View commit details
  2. correct ga

    Eric L. Solis committed Nov 25, 2020
    Copy the full SHA
    5344992 View commit details

Commits on Nov 26, 2020

  1. move ga

    U1F30C committed Nov 26, 2020
    Copy the full SHA
    7584a5f View commit details
  2. Add ANN

    U1F30C committed Nov 26, 2020
    Copy the full SHA
    42a6a93 View commit details
  3. Copy the full SHA
    a01914c View commit details
  4. Update network inputs

    Add case use for testing :(
    U1F30C committed Nov 26, 2020
    Copy the full SHA
    b9e5544 View commit details
  5. Copy the full SHA
    46a2a36 View commit details
  6. Add method to distribute array of weight references into network's ne…

    …urons
    Eric L. Solis committed Nov 26, 2020
    Copy the full SHA
    3f5e5f4 View commit details
  7. Hide player on death

    Eric L. Solis committed Nov 26, 2020
    Copy the full SHA
    b12e4d3 View commit details
  8. End game on death of all players

    Aslo, display actual score again
    Eric L. Solis committed Nov 26, 2020
    Copy the full SHA
    767c0a0 View commit details
  9. Comment out ai algorithms testing, also use CJS exports

    Eric L. Solis committed Nov 26, 2020
    Copy the full SHA
    b608022 View commit details

Commits on Nov 27, 2020

  1. Copy the full SHA
    b99b70d View commit details
  2. Glue everything toghether

    Add trainer
    Add brain to dinos and use it to control movement
    Connect players and ga individuals
    U1F30C committed Nov 27, 2020
    Copy the full SHA
    a9ac77c View commit details
  3. Copy the full SHA
    5520cf4 View commit details
  4. Automatically restart game

    U1F30C committed Nov 27, 2020
    Copy the full SHA
    0ac3722 View commit details
  5. Copy the full SHA
    7d8ba96 View commit details
  6. Attempt to discreetly replace ga for pso

    Eric L. Solis committed Nov 27, 2020
    Copy the full SHA
    a4551d9 View commit details
Showing with 10,452 additions and 26 deletions.
  1. +1 −0 package.json
  2. +50 −0 src/ai/Trainer.js
  3. +23 −0 src/ai/ann/Layer.js
  4. +114 −0 src/ai/ann/Network.js
  5. +44 −0 src/ai/ann/Neuron.js
  6. +77 −0 src/ai/ann/math.js
  7. +120 −0 src/ai/ga.js
  8. +9 −8 src/prefabs/player/InputManager.js
  9. +9 −3 src/prefabs/player/Player.js
  10. +31 −6 src/scenes/game/GameScene.js
  11. +10 −9 src/scenes/game/InputManager.js
  12. +9,964 −0 yarn.lock
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
@@ -24,6 +24,7 @@
"lint:staged": "lint-staged --relative"
},
"dependencies": {
"lodash": "^4.17.20",
"phaser": "^3.24.1"
},
"devDependencies": {
50 changes: 50 additions & 0 deletions src/ai/Trainer.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import { GeneticAlgorithm } from './ga';
import { Network } from './ann/Network';
const { weightsForArchitecture } = require('./ann/math');
const { times } = require('lodash');
export class Trainer {
constructor(populationSize) {
// this.population = times(populationSize, () => ({}));

const networkArchitecture = [[2], [4, 'step'], [2, 'step']];

this.ga = new GeneticAlgorithm(populationSize, {
dimentions: weightsForArchitecture(networkArchitecture.map(([n]) => n)),
min: -10,
max: 10,
});
this.population = this.ga.population.map((individual, i) => {
individual.brain = Network(networkArchitecture);
individual.brain.distributeWights(this.ga.population[i]._genome);
return individual;
});
}
async train() {
this.ga.evolve();
}
}

const bestGenomeSoFarFor2_4_2stepArchitecture = [
1.490333231205124,
4.935242073306053,
-0.8114305730385176,
-6.984377091898013,
-9.678558492090549,
6.399312732780945,
-2.1735241450040954,
0.5852112901511575,
-2.4717308963370277,
-3.111177095705684,
-4.27760733936049,
0.4866304170340854,
-3.0260548902908724,
-3.6344324236983017,
0.9492671870980285,
0.026422413457698468,
-2.712895113854894,
-1.1128790410541978,
-4.492750554099505,
2.4425150874251234,
-2.961236209177085,
5.580061431941816,
];
23 changes: 23 additions & 0 deletions src/ai/ann/Layer.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
const { chunk } = require('lodash');
const { Neuron } = require('./Neuron');

function Layer([size, activation]) {
let layer = {
neurons: Array.from(Array(size)).map(_ => Neuron(1, activation)),
error: Infinity,
predict,
distributeWights,
};
function predict(inputs) {
layer.output = layer.neurons.map(neuron => neuron.predict(inputs));
return layer.output;
}
function distributeWights(weights) {
chunk(weights, weights.length / layer.neurons.length).forEach((weightChunk, i) => {
layer.neurons[i].weights = weightChunk;
});
}
return layer;
}

module.exports = { Layer };
114 changes: 114 additions & 0 deletions src/ai/ann/Network.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
const { Layer } = require('./Layer');
const { delta, mse, weightsForArchitecture } = require('./math');
const { sum, times } = require('lodash');

function Network(layerDescriptors, learningRate = 0.5) {
let layers = [];
let trainingData = [];
layers = layerDescriptors.slice(1).map(Layer);
const architecture = layerDescriptors.map(([n]) => n);

const network = {
layers,
forward,
error: Infinity,
trainingData,
train,
converges,
distributeWights,
};
function distributeWights(weightObjects) {
let offset = 0;
for (let i = 1; i < architecture.length; i++) {
const currentLayerWeightCount = architecture[i] * (architecture[i - 1] + 1);
layers[i - 1].distributeWights(weightObjects.slice(offset, offset + currentLayerWeightCount));
offset += currentLayerWeightCount;
}
}

function forward(inputs) {
const layerOutputs = [layers[0].predict(inputs)];
layers.slice(1).forEach(layer => {
layerOutputs.push(layer.predict(layerOutputs.slice(-1)[0]));
});
return layerOutputs.slice(-1)[0];
}

function converges(acceptableError = 0.05) {
return network.error < acceptableError;
}

function train() {
const outLayer = network.layers.slice(-1)[0];
network.trainingData.forEach(([inputs, outputs]) => {
network.forward(inputs);
outLayer.neurons.forEach((neuron, i) => {
neuron.error = outputs[i] - neuron.output;
neuron.delta = neuron.deltaFunction(neuron.output, neuron.error);
});

for (let l = network.layers.length - 2; l >= 0; l--) {
network.layers[l].neurons.forEach((neuron, i) => {
neuron.error = sum(
network.layers[l + 1].neurons.map(function (n) {
return n.weights[i].value * n.delta;
}),
);

neuron.delta = neuron.deltaFunction(neuron.output, neuron.error);

network.layers[l + 1].neurons.forEach(nextNeur =>
nextNeur.adjust(learningRate * nextNeur.delta),
);
});
}
});
network.error = mse(outLayer.neurons.map(n => n.error));
}

return network;
}

module.exports = { Network };

// const network = Network(
// [
// [2],
// [3, 'sigmoid'],
// [1, 'sigmoid'],
// ],
// 0.9,
// );
// const problemSize = weightsForArchitecture([2, 3, 1]);
// const weights = times(problemSize, () => ({ value: Math.random() }));
// network.distributeWights(weights);

// const inputs = [
// [0, 0],
// [0, 1],
// [1, 0],
// [1, 1],
// ];
// const outputs = [[0], [1], [1], [0]];

// network.trainingData = inputs.map((inputSet, i) => [inputSet, outputs[i]]);

// let actualOutputs;
// let i = 0;
// while (!network.converges(0.1)) {
// network.train();
// actualOutputs = inputs
// .map(input =>
// network
// .forward(input)
// .map(x => Math.round(x))
// .join(','),
// )
// .join(';');
// if (i == 10000) {
// console.log(actualOutputs);
// console.log(network.error);
// i = 0;
// }
// i++;
// }
44 changes: 44 additions & 0 deletions src/ai/ann/Neuron.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
const { dot, activations } = require('./math');
const { times } = require('lodash');

function Neuron(inputQuantity = 1, type = 'linear') {
let weights = times(inputQuantity + 1, () => ({ value: Math.random() }));
let neuron = {
weights,
predict,
adjust,
inputs: null,
output: null,
deltaFunction: activations[type].delta,
};

function _predict(inputs) {
return dot(
neuron.weights.map(w => w.value),
inputs,
);
}

function predict(inputs) {
fill(inputs.length);
inputs = [...inputs, -1];
neuron.inputs = inputs;
neuron.output = activations[type].function(_predict(inputs));

return neuron.output;
}

function adjust(delta) {
for (let i = 0; i < neuron.weights.length; i++) {
neuron.weights[i].value += delta * neuron.inputs[i];
}
}

function fill(inputsLength) {
while (inputsLength >= neuron.weights.length) neuron.weights.push({ value: Math.random() });
}

return neuron;
}

module.exports = { Neuron };
77 changes: 77 additions & 0 deletions src/ai/ann/math.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
const { sumBy } = require('lodash');

function dot(v1, v2) {
let result = 0;
for (let i = 0; i < v1.length; i++) {
result += v1[i] * v2[i];
}
return result;
}

function generateLine(neuron) {
const [w1, w2] = neuron.weights;
const bias = neuron.bias;
// y = b/w2 - w1x1/w2
let leftLimit = -2,
rightLimit = 3;

let p1 = { x: leftLimit, y: bias / w2 - (w1 * leftLimit) / w2 };
let p2 = { x: rightLimit, y: bias / w2 - (w1 * rightLimit) / w2 };
return [p1, p2];
}

function mse(arr) {
return sumBy(arr, x => Math.pow(x, 2)) / arr.length;
}

const activations = {
step: {
function: function (output) {
return output > 0 ? 1 : 0;
},
delta: null,
},
sigmoid: {
function: function (output) {
const ex = Math.exp(-output);
return 1 / (ex + 1);
},
delta: function (output, error) {
return output * (1 - output) * error;
},
},
relu: {
function: function (output) {
return Math.max(0, output);
},
delta: function (output, error) {
return (output < 0 ? 0 : 1) * error;
},
},
lrelu: {
function: function (output) {
return Math.max(0.1 * output, output);
},
delta: function (output, error) {
return (output < 0 ? 0.1 : 1) * error;
},
},
linear: {
function: function (output) {
return output;
},
delta: function (output, error) {
return 1 * error;
},
},
};

function weightsForArchitecture(arch) {
let sum = 0;
for (let i = 0; i < arch.length - 1; i++) {
sum += (arch[i] + 1) * arch[i + 1];
}
return sum;
}

module.exports = { dot, activations, mse, weightsForArchitecture };
Loading