Here at Cloudoki we came up with an idea for our #hackfridays: to make an theremin ♪♫ with an Arduino!

So here's how we did it.

Arduino Theremin

We looked inside our Arduino kit for a while and decided to use an LDR sensor and a piezo to create our theremin.

Requirements:

  • Arduino
  • LDR
  • Piezo
  • Resistor
  • Arduino IDE

The Circuit
theremin_circuit

So we setup our theremin following the Fritzing circuit above, resulting on the image bellow.
theremin_circuit_real
Outside #hackfridays ftw

The code:

int val = 0;
const int piezoPin =  6;

void setup() {
  pinMode(piezoPin, OUTPUT);
  delay(3000);
}

void loop() {
  digitalWrite(piezoPin, HIGH);
  delayMicroseconds(val);
  val = 4 * analogRead(A5);
  digitalWrite(piezoPin, LOW);
  delayMicroseconds(val);
}

After running the code the piezo will start playing after 3 seconds, when you move your hand above the LDR to block the light and achieve different light intensities on it, the result is close to an actual theremin.

But how can we improve this?

How about we use Web Audio API and Johnny-Five for the sound part instead of the piezo and add a canvas to visualize it... Sound interesting, right? Let's do this.

Note
You may need to go to File > Examples > Firmata > StandardFirmata and upload it to Arduino in case you run into error connecting Johnny-Five with the Arduino.

So now we removed the piezo from the setup and ended up with the following:

theremin_no_piezo
Back to the office

The Node Code

We'll need Johnny Five to connect to the Arduino, use ExpressJS to serve our app and Socket.io to send the values to the Web Audio API.

var express = require('express');
var app = require('express')();
var http = require('http').Server(app);
var io = require('socket.io')(http);

var five = require("johnny-five");
var board = new five.Board();

app.use('/', express.static('public'));

board.on("ready", function() {
  var sensor = new five.Sensor("A5");
  sensor.on("change", function() {
    io.emit('new input', this.fscaleTo(0, 10000));
  });
});

io.on('connection', function(socket){
  console.log('a user connected');
  socket.on('disconnect', function(){
    console.log('user disconnected');
  });
});

http.listen(3000, function(){
  console.log('listening on *:3000');
});

The Client side Code

We need to get the AudioContext. Create an oscillator node to generate our theremin notes, an analyser node to create the visalizer and a gain node for volume control. And of course we need Socket.io to get the values from the Arduino.

var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var oscillator, analyser, gainNode;

// Socket io
var socket = io();
socket.on('new input', function(value){
    // console.log(value, typeof value);
    if (typeof value === "number") {
      oscillator.frequency.value = value;
    }
});

function init() {
  gainNode = audioCtx.createGain();
  analyser = audioCtx.createAnalyser();


  // connect oscillator to gain node to speakers

  analyser.fftSize = 2048;
  var bufferLength = analyser.frequencyBinCount;
  var dataArray = new Uint8Array(bufferLength);
  analyser.getByteTimeDomainData(dataArray);

  oscillator.connect(gainNode);
  gainNode.connect(analyser);
  analyser.connect(audioCtx.destination);

  // create initial theremin frequency and volume values

  var WIDTH = window.innerWidth;
  var HEIGHT = window.innerHeight;

  var maxFreq = 6000;
  var maxVol = 0.02;

  var initialFreq = 0;
  var initialVol = 1;

  // set options for the oscillator
  oscillator.type = 'square';
  oscillator.frequency.value = initialFreq; // value in hertz
  oscillator.detune.value = 100; // value in cents
  oscillator.start(0);

  gainNode.gain.value = initialVol;

  function draw() {
    var canvas = document.getElementById("canvas_audio");

    var WIDTH = canvas.width;
    var HEIGHT = canvas.height;

    var ctx = canvas.getContext("2d");

    var bufferLength = analyser.frequencyBinCount;
    var dataArray = new Uint8Array(bufferLength);
    analyser.getByteTimeDomainData(dataArray);

    ctx.fillStyle = 'rgb(0, 0, 0)';
    ctx.fillRect(0, 0, WIDTH, HEIGHT);

    ctx.lineWidth = 2;
    ctx.strokeStyle = 'rgb(128, 255, 0)';

    ctx.beginPath();

    var sliceWidth = WIDTH * 1.0 / bufferLength;
    var x = 0;

    for (var i = 0; i < bufferLength; i++) {

        var v = dataArray[i] / 128.0;
        var y = v * HEIGHT / 2;

        if (i === 0) {
            ctx.moveTo(x, y);
        } else {
            ctx.lineTo(x, y);
        }

        x += sliceWidth;
    }

    ctx.lineTo(canvas.width, canvas.height / 2);
    ctx.stroke();
    window.requestAnimationFrame(draw);
  }

  draw();
}

init();

All that's left to do is build a nice web page for it and you are done. Or are you?

Is this enough?

We need something more... I know!!! Control the voice pitch with it.

So now we'll need to get the microphone audio input and change its pitch (we'll use one of cwilso Audio-Input-Effects scripts for that).

We'll add the code to get usermedia and modify the init function to allow changing from synthesizing mode and voice mode:

New Code

navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

var effect, mode;

// mute button
var mute = document.querySelector('.mute');

mute.onclick = function() {
  if(mute.getAttribute('data-muted') === 'false') {
    analyser.disconnect(audioCtx.destination);
    mute.setAttribute('data-muted', 'true');
    mute.innerHTML = "Unmute";
  } else {
    analyser.connect(audioCtx.destination);
    mute.setAttribute('data-muted', 'false');
    mute.innerHTML = "Mute";
  }
};

// Socket io
var socket = io();
socket.on('new input', function(value){
    // console.log(value, typeof value);
    if (typeof value === "number") {
      if(mode == "voice" && effect.setPitchOffset) {
        effect.setPitchOffset(value);
      }
      if(mode == "synth") {
        oscillator.frequency.value = value;
      }
    }
});

// mode button
var playingMode = document.querySelector('.mode');

playingMode.onclick = function() {

  if(mute.getAttribute('data-muted') === 'true') {
    analyser.connect(audioCtx.destination);
    mute.setAttribute('data-muted', 'false');
    mute.innerHTML = "Mute";
  }

  if(playingMode.getAttribute('data-mode') === 'voice') {
    playingMode.setAttribute('data-mode', 'synth');
    playingMode.innerHTML = "Voice Input";
    if(analyser)
      analyser.disconnect(audioCtx.destination);
    init(undefined, "synth");
  } else {
    analyser.connect(audioCtx.destination);
    playingMode.setAttribute('data-mode', 'voice');
    playingMode.innerHTML = "Synth Input";
    if(analyser)
      analyser.disconnect(audioCtx.destination);
    if (!navigator.getUserMedia) {

         alert('Your browser does not support the Media Stream API');

     } else {

         navigator.getUserMedia(

             {audio: true, video: false},

             function (stream) {
                 var audioSource = audioCtx.createMediaStreamSource(stream);
                 init(audioSource, "voice");
             },

             function (error) {
                 alert('Unable to get the user media');
             }
         );
     }
  };
}

function init(audioSource, newMode) {
  mode = newMode;
  socket.emit("change_mode", mode);
  // create Oscillator and gain node
  if(mode == "synth") {
    oscillator = audioCtx.createOscillator();
  }

  ...
  analyser.getByteTimeDomainData(dataArray);

  switch(mode) {
    case "synth":
      oscillator.connect(gainNode);
      break;
    case "voice":
      effect = new Jungle(audioCtx);
      effect.output.connect(gainNode);
      audioSource.connect(effect.input);
      break;
  }

  gainNode.connect(analyser);
  analyser.connect(audioCtx.destination);

  ...
  var initialFreq = 0;
  var initialVol = 1;

  // set options for the oscillator
  if(mode == "synth") {
    oscillator.type = 'square';
    oscillator.frequency.value = initialFreq; // value in hertz
    oscillator.detune.value = 100; // value in cents
    oscillator.start(0);
  }

  ...
  draw();
}
init(undefined, "synth");

theremin_no_piezo_final
Arduino + Nerd + Nerf = happy #hackfridays

You can get the full code here.

See you on the next blog post!