Mercurial > hg > cmdp
view src/uk/ac/qmul/eecs/depic/daw/AudioLoader.java @ 4:473da40f3d39 tip
added html formatting to Daw/package-info.java
author | Fiore Martin <f.martin@qmul.ac.uk> |
---|---|
date | Thu, 25 Feb 2016 17:50:09 +0000 |
parents | 629262395647 |
children |
line wrap: on
line source
/* Cross-Modal DAW Prototype - Prototype of a simple Cross-Modal Digital Audio Workstation. Copyright (C) 2015 Queen Mary University of London (http://depic.eecs.qmul.ac.uk/) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package uk.ac.qmul.eecs.depic.daw; import java.io.BufferedInputStream; import java.io.File; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.ArrayList; import java.util.List; import javax.sound.sampled.AudioFormat; import javax.sound.sampled.AudioInputStream; import javax.sound.sampled.AudioSystem; import javax.sound.sampled.UnsupportedAudioFileException; import javax.swing.SwingWorker; import uk.ac.qmul.eecs.depic.daw.AudioLoader.ReturnObject; /** * * A swing worker that loads an audio file in a separate thread. It returns a RetunObject when the loading is complete. * */ public class AudioLoader extends SwingWorker<ReturnObject,Void>{ public static final int FILE_LOAD_TOTAL_PROGRESS = 100; /** * The default conversion format used. Also the conversion format returned by {@code getConversionFormat()} */ public static final AudioFormat DEFAULT_CONVERSION_FORMAT = new AudioFormat( 8000.0f, // sample rate 16, // bit per sample 1, // mono true, // signed false // little endian (default .wav files) ); private File audioFile; private int minChunkSize; private int maxScaleFactor; public AudioLoader(File audioFile, int minChunkSize, int maxScaleFactor){ this.audioFile = audioFile; this.minChunkSize = minChunkSize; this.maxScaleFactor = maxScaleFactor; } /** * Reads the audio files and build all the min and max for all the shunks of frames */ @Override protected ReturnObject doInBackground() throws Exception { /* get all the info about the file format, needed later for the estimate of the converted file length */ AudioInputStream originalFile = AudioSystem.getAudioInputStream(audioFile); AudioFormat originalAudioFormat = originalFile.getFormat(); long originalNumTotalFrames = originalFile.getFrameLength(); float originalFrameSize = originalAudioFormat.getFrameSize(); float originalFrameRate = originalAudioFormat.getFrameRate(); float originalNumChannels = originalAudioFormat.getChannels(); if(originalNumTotalFrames == 0) throw new UnsupportedAudioFileException("File Empty"); /* convert the audio format to the one suitable for parsing chunks and get min and max from them (see getConversionFormat()) */ AudioFormat conversionFormat = getConversionFormat(); if(!AudioSystem.isConversionSupported(conversionFormat,originalAudioFormat)){ throw new UnsupportedAudioFileException("Cannot convert file to the following format: "+conversionFormat); } AudioInputStream convertedFile = AudioSystem.getAudioInputStream(conversionFormat, originalFile); /* start parsing the file and building the chunks' minimums and maximums */ /* all the variable from here on, unless they begin with "originalFile" refer to the converted audio stream */ byte[] audioBytes = new byte[minChunkSize * conversionFormat.getFrameSize()]; /* prepare the ByteBuffer, wrapping the byte array, that will be used to read Short values */ ByteBuffer byteBuffer = ByteBuffer.wrap(audioBytes); /* set the endiannes according to the audio format */ byteBuffer.order(conversionFormat.isBigEndian() ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN); /* make an estimation of the frames in the converted file, based on the original file. This is necessary because * * convertedFile is a stream pointing to original file and not a file itself. Therefore getFrameLength() returns * * -1 as t doesn't have any knowledge of the length of the underlying file. So make an estimate of the length * * of the file after conversion in order to allocate enough space when the ArrayList in newFlechunks is allocated* * and for the progress of file load. */ float convertedFrameSize = conversionFormat.getFrameSize(); float convertedFrameRate = conversionFormat.getFrameRate(); float convertedNumChannels = conversionFormat.getChannels(); long convertedNumTotalFrames = (long) ( originalNumTotalFrames * (convertedFrameSize/originalFrameSize) * (convertedFrameRate/originalFrameRate) * (convertedNumChannels/originalNumChannels)); long convertedNumTotalBytes = convertedNumTotalFrames * conversionFormat.getFrameSize(); /* creates the first list of chunks with smallest size. Array size = audio frames/num frames of minimum chunk */ WavePeaks newFileChunks = new WavePeaks(maxScaleFactor); /* first List s for scale factor = 1, that is the finest zoom scale */ newFileChunks.add(1, new ArrayList<Chunk>((int)(convertedNumTotalFrames/minChunkSize) +1)); int numBytesRead = 0; float totalBytesRead = 0f; try(BufferedInputStream chunkBufferedAudio = new BufferedInputStream(convertedFile,audioBytes.length)){ while((numBytesRead = chunkBufferedAudio.read(audioBytes)) != -1){ if(isCancelled()) return null; totalBytesRead += numBytesRead; /* normalize the progress value to total load */ int progress = (int) ((totalBytesRead/convertedNumTotalBytes)*FILE_LOAD_TOTAL_PROGRESS); if(progress < FILE_LOAD_TOTAL_PROGRESS) setProgress(progress); /* Now read the byte buffer, backed by audioByte, and find min and max. The audio format * * has been converted to signed 16 bit frames, so it can be read in a Short value */ Short currentMax = Short.MIN_VALUE; Short currentMin = Short.MAX_VALUE; /* find maximum and minimum values in this chunk */ byteBuffer.clear(); byteBuffer.limit(numBytesRead); while(byteBuffer.hasRemaining()){ Short frame = byteBuffer.getShort(); if(frame > currentMax) currentMax = frame; if(frame < currentMin) currentMin = frame; } newFileChunks.get(1).add(new Chunk(currentMin, currentMax)); } } for(int scaleFactor = 2; scaleFactor <= maxScaleFactor; scaleFactor++){ List<Chunk> previousList = newFileChunks.get(scaleFactor-1); List<Chunk> newList = new ArrayList<>(previousList.size()/2+1); for(int i=0; i<previousList.size();i += 2){ /* check if we're at the last array item, which happens when the size is odd * * In this case we don't merge two items but just take the last item as a new one */ if(i == previousList.size()-1){ newList.add(previousList.get(i)); break; // end of the array anyway } newList.add(new Chunk(previousList.get(i),previousList.get(i+1))); } newFileChunks.add(scaleFactor, newList); } /* open the Sample for playback */ Sample sample = Daw.getSoundEngineFactory().createSample(audioFile.getAbsolutePath()); /* return sample and chunks to the event dispatching thread */ return new ReturnObject(newFileChunks,sample,originalAudioFormat,conversionFormat); } protected AudioFormat getConversionFormat(){ return DEFAULT_CONVERSION_FORMAT; } /** * * An object returned by the AudioLoader. It contains meta data about the sound sample such as wave peaks and format * as well as the Sample object representing the loaded sample. * */ public static class ReturnObject { public ReturnObject(WavePeaks peaks, Sample s, AudioFormat originalFormat, AudioFormat conversionFormat) { super(); this.peaks = peaks; this.sample = s; this.originalFormat = originalFormat; this.conversionFormat = conversionFormat; } public WavePeaks peaks; public Sample sample; public AudioFormat originalFormat; public AudioFormat conversionFormat; } }