Advertisement

Sri Lanka's First and Only Platform for Luxury Houses and Apartment for Sale, Rent

Monday, December 17, 2012

SpringObjectFactory Error in creating application in Struts 2.3.7

Recently I tried to work with Struts 2 (2.3.7) Version and it gives the impression that all the Jars available in lib directory of the distribution are required to run. But seems that is not the case because it causes the following exception in Spring.
org.apache.catalina.core.StandardContext filterStart
SEVERE: Exception starting filter struts2
Class: com.opensymphony.xwork2.spring.SpringObjectFactory
File: SpringObjectFactory.java
Method: getClassInstance
Line: 230 - com/opensymphony/xwork2/spring/SpringObjectFactory.java:230:-1
    at org.apache.struts2.dispatcher.Dispatcher.init(Dispatcher.java:483)
    at org.apache.struts2.dispatcher.ng.InitOperations.initDispatcher(InitOperations.java:74)
    at org.apache.struts2.dispatcher.ng.filter.StrutsPrepareAndExecuteFilter.init(StrutsPrepareAndExecuteFilter.java:51)
    at org.apache.catalina.core.ApplicationFilterConfig.getFilter(ApplicationFilterConfig.java:295)
    at org.apache.catalina.core.ApplicationFilterConfig.setFilterDef(ApplicationFilterConfig.java:422)
    at org.apache.catalina.core.ApplicationFilterConfig.<init>(ApplicationFilterConfig.java:115)
    at org.apache.catalina.core.StandardContext.filterStart(StandardContext.java:4072)
    at org.apache.catalina.core.StandardContext.start(StandardContext.java:4726)
    at org.apache.catalina.core.ContainerBase.start(ContainerBase.java:1057)
    at org.apache.catalina.core.StandardHost.start(StandardHost.java:840)
    at org.apache.catalina.core.ContainerBase.start(ContainerBase.java:1057)
    :754)
    at org.apache.catalina.startup.Catalina.start(Catalina.java:595)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

Caused by: java.lang.NullPointerException
    at com.opensymphony.xwork2.spring.SpringObjectFactory.getClassInstance(SpringObjectFactory.java:230)
    at org.apache.struts2.dispatcher.Dispatcher.init_PreloadConfiguration(Dispatcher.java:429)
    at org.apache.struts2.dispatcher.Dispatcher.init(Dispatcher.java:471)
    ... 20 more
This Exception puzzles many people because it is related to Spring. To solve this you should just use the below mentioned Jars in the WEB-INF/lib directory only,
asm-3.3.jar
asm-commons-3.3.jar
asm-tree-3.3.jar
commons-fileupload-1.2.2.jar
commons-io-2.0.1.jar
commons-lang3-3.1.jar
freemarker-2.3.19.jar
javaassist-3.11.0.GA.jar
ognl-3.0.5.jar
struts2-core-2.3.7.jar
xwork-core-2.3.7.jar

Tuesday, November 13, 2012

Storing Apache Hadoop WordCount Example Output to Database

Apache Hadoop WordCount example is the HelloWorld of Hadoop. Using this to Database Sinking of Hadoop output makes it easy to understand. Database I used is MySQL and the DDL for table used is as following;

CREATE TABLE word_count(word VARCHAR(254), count INT);
After creating the following Apache Hadoop Job along with Mapper and Reducer to Sink the output to Database. For this I use DBOutputFormat as the OutputFormat and DBConfiguration to specify DB configuration parameters.
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapred.lib.db.DBOutputFormat;
import org.apache.hadoop.mapred.lib.db.DBWritable;

public class WordCount {
    public static class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, DBOutput, IntWritable> {
        private static IntWritable one = new IntWritable(1);
        private static DBOutput text = new DBOutput();
        @Override
        public void map(LongWritable key, Text value,
                OutputCollector<DBOutput, IntWritable> collect, Reporter arg3)
                throws IOException {
            StringTokenizer token = new StringTokenizer(value.toString());
            while(token.hasMoreTokens()) {
                text.setText(token.nextToken());
                collect.collect(text, one);
            }            
                        
        }
        
    }
    
    public static class WordCountReducer extends MapReduceBase implements Reducer<DBOutput, IntWritable, DBOutput, IntWritable> {

        
        @Override
        public void reduce(DBOutput key, Iterator<IntWritable> values,
                OutputCollector<DBOutput, IntWritable> collect, Reporter arg3)
                throws IOException {
            int sum = 0;
            IntWritable no = null;
            DBOutput dbKey = new DBOutput();
            
            while(values.hasNext()) {
                no = values.next();
                sum += no.get();
            }
            dbKey.setText(key.getText());
            dbKey.setNo(sum);
            collect.collect(dbKey, new IntWritable(sum));
            
        }
        
    }
    
    public void run(String inputPath, String outputPath) throws Exception {
        JobConf conf = new JobConf(WordCount.class);
        conf.setJobName("wordcount");
        DistributedCache.addFileToClassPath(new Path("<Absolute Path>/mysql-connector-java-5.1.7-bin.jar"), conf);

        // the keys are DBOutput
        conf.setOutputKeyClass(DBOutput.class);
        // the values are counts (ints)
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(WordCountMapper.class);
        conf.setReducerClass(WordCountReducer.class);        
        
        conf.setOutputFormat(DBOutputFormat.class);

        FileInputFormat.addInputPath(conf, new Path(inputPath));
        DBOutputFormat.setOutput(conf, "word_count", "word", "count");
        
        DBConfiguration.configureDB(conf, "com.mysql.jdbc.Driver", "jdbc:mysql://localhost:3306/sample", "root", "root");
        
        //FileOutputFormat.setOutputPath(conf, new Path(outputPath));

        JobClient.runJob(conf);
      }
    
    public static void main(String[] args) throws Exception {
        WordCount wordCount = new WordCount();
        wordCount.run(args[0], args[1]);
    }
    
    private static class DBOutput implements DBWritable, WritableComparable<DBOutput> {
        
        private String text;
        
        private int no;

        @Override
        public void readFields(ResultSet rs) throws SQLException {
            text = rs.getString("word");
            no = rs.getInt("count");
        }

        @Override
        public void write(PreparedStatement ps) throws SQLException {
            ps.setString(1, text);
            ps.setInt(2, no);
        }
        
        public void setText(String text) {
            this.text = text;
        }
        
        public String getText() {
            return text;
        }
        
        public void setNo(int no) {
            this.no = no;
        }
        
        public int getNo() {
            return no;
        }

        @Override
        public void readFields(DataInput input) throws IOException {
            text = input.readUTF();    
            no = input.readInt();
        }

        @Override
        public void write(DataOutput output) throws IOException {
            output.writeUTF(text);
            output.writeInt(no);
        }

        @Override
        public int compareTo(DBOutput o) {
            return text.compareTo(o.getText());
        }
        
    }
}
Furthermore I have written a custom Hadoop type for key which implements DBWritable and WritableComparable. I have used this as the Output Key Class. Command to run this is as following;
./bin/hadoop jar <Path to Jar>/HadoopTest.jar WordCount <Input Folder> <Dummy Output Folder>

Monday, November 12, 2012

Hadoop Vs Cassandra and HBase

The Vanilla hadoop consists of a Distributed File System (DFS) at the core and libraries to support Map Reduce model to write programs to do analysis. DFS is what enables Hadoop to be scalable. It takes care of chunking data into multiple nodes in a multi node cluster so that Map Reduce can work on individual chunks of data available nodes thus enabling parallelism.
The paper for Google File System which was the basis for Hadoop Distributed File System (HDFS) can be found here
The paper for Map Reduce model can be found here

For a detailed explanation on Map Reduce read this post
Cassandra is a highly scalable, eventually consistent, distributed, structured key-value store. It is not a conventional database but is more like Hashtable or HashMap which stores a key/value pair. Both Cassandra and HBase are implementations of Google's BigTable. Paper for Google BigTable can be found here.
BigTable makes use of a String Sorted Table (SSTable) to store key/value pairs. SSTable is just a File in HDFS which stores key followed by value. Furthermore BigTable maintains a index which has key and offset in the File for that key which enables reading of value for that key using only a seek to the offset location. SSTable is effectively immutable which means after creating the File there is no modifications can be done to existing key/value pairs. New key/value pairs are appended to the file. Update and Delete of records are appended to the file, update with a newer key/value and deletion with a key and tombstone value. Duplicate keys are allowed in this file for SSTable.  The index is also modified with whenever update or delete take place so that offset for that key points to the latest value or tombstone value.

Thus you can see Cassandra's/HBase's internals allow fast read/write which is crucial for real time data handling. Whereas Vanilla Hadoop with Map Reduce can be used to process batch oriented passive data.

Saturday, November 3, 2012

Distance Scanner - Low Cost Walking Aid for the Blind

I got the time to work on arduino and put up a small project to aid Blind people navigate through obstacles. There are many attempts made by the arduino community to accomplish this but most of them used motors, servo motors to give feedback. But Blind people have to get used to different inputs they get from these devices and driving motor requires a lot of battery power, thus users might have to end up changing batteries frequently.

Furthermore many Blind use sound which comes from tapping the White cane to navigate through obstacles so sound is something that they are already familiar with.

My approach uses a Ultrasonic Ping Sensor and a Buzzer to notify the Blind person who wears it with a Buzzing sound. Items required are;

  1. Arduino (Any Model)
  2. Ultrasonic Ping Sensor
  3. Buzzer or Piezo Speaker
  4. Jumper Cables

Arduino Sketch Source code is below 

/* 
  Distance Scanner 
  By Shazin Sadakath
*/

#define BUZZER 10

int URPWM = 3; 
int URTRIG=5; 

int minDistanceCm = 50;

unsigned int Distance=0;
uint8_t EnPwmCmd[4]={0x44,0x02,0xbb,0x01};    
 
void setup(){                                 
 
    pinMode(URTRIG,OUTPUT);                   
  digitalWrite(URTRIG,HIGH);                  
  
  pinMode(URPWM, INPUT);                      
    
  pinMode(BUZZER, OUTPUT);
}
 
void loop()
{
 digitalWrite(URTRIG, LOW);
 digitalWrite(URTRIG, HIGH);               
     
 unsigned long DistanceMeasured=pulseIn(URPWM,LOW);
     
 if(DistanceMeasured==50000){              
     // Invalid    
 }else{
     Distance=DistanceMeasured/50;           
 }
 delay(20);
 buzzBasedOnDistance();
}                      

void buzzBasedOnDistance() {
  int far = Distance - minDistanceCm;
  if(far <= 0) {
    digitalWrite(BUZZER, 200); 
  } else {
    digitalWrite(BUZZER, 0);
  }
}

There is a configurable minimum distance threshold and when objects get closer than that the buzzer makes the noise so that obstacle can be avoided. Check the video below for a small demonstration.




The USB cable is only used for Power. It can be replaced with a battery pack powering the arduino.

Constructive criticism is always welcome!


Friday, October 19, 2012

Best Practice when XML Content Writing to Files

For performance, there are many developers who create XML content using traditional StringBuffer or StringBuilder by appending values with start and end tags. This has a performance advantage than using a XML Library API to generate XML both memory wise as well as Computation wise. But many developers don't know a side effect which can occur by using this method.

For example if you create the XML content by appending strings and use the following

        StringBuilder xml = new StringBuilder("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
        xml.append("<envelope>");
        xml.append(String.format("<header>%s</header>", "Important"));
        xml.append(String.format("<body>%s</body>", "Hello, World!"));
        xml.append("</envelope>");
        BufferedWriter bw = null;
        try {
            bw = new BufferedWriter(new FileWriter("message.xml"));
            bw.write(xml.toString());
        } catch(Exception e) {
            e.printStackTrace();
        } finally {
            try {
                if(bw != null) {
                    bw.close();
                }
            } catch (IOException e) {                
                e.printStackTrace();
            }
        }

The above code will work as long as for content of XML you have Characters which are accepted by the default character set of the Operating System you used to execute this code with. If the character set is not "UTF-8" as soon as you have something like this "CAFÉ" in your XML Content, the XML will be corrupted and will not be able to opened up using web browsers and cannot be parsed using any parsing API.

To fix this you can either ensure you will never have characters outside of the default character set of the Operating System or if you want "UTF-8" by any means, you can change the content writing as following which will fix the issue.

        try {
            FileOutputStream fos = new FileOutputStream("message.xml");
            bw = new BufferedWriter(new OutputStreamWriter(fos, "UTF8"));
            bw.write(xml.toString());
        } catch(Exception e) {
            e.printStackTrace();
        } finally {
            try {
                if(bw != null) {
                    bw.close();
                }
            } catch (IOException e) {                
                e.printStackTrace();
            }
        }

In this scenario we are explicitly saying that we need the content to be written in UTF8 Character set using OutputStreamWriter.

Friday, October 5, 2012

Usage of ThreadLocal in Java

The ThreadLocal class is an important yet often neglected class in the java default library. Many developers don't understand the usage of this class so this is my attempt to explain it.

The definition of this class in official javadoc is

"This class provides thread-local variables. These variables differ from their normal counterparts in that each thread that accesses one (via its get or set method) has its own, independently initialized copy of the variable"

So according to the definition if an Instance of ThreadLocal variable is accessed by different threads each thread will get a copy of the variable which is not shared or visible by other threads. Simply put this class enables Thread confinement of variables. The usual approach developers use to have independent variables in different threads is they initialize and assign each thread an object instance which implements Runnable or extends Thread where the required variable is an instance variable in that object. 

But ThreadLocal is ideal where you want to initialize one object and assign them to different threads yet have independent variables. The following example shows the class at work.

public class ThreadLocalTest implements Runnable {
    private static ThreadLocal<Integer> no = new ThreadLocal<Integer>() {
        public Integer initialValue() {
            return 0;
        }
    };
    
    public static void main(String[] args) {
        Thread t1 = null;
        // Single Object
        ThreadLocalTest test = new ThreadLocalTest();
        for(int i=0;i<10;i++) {
            // test object is used in 10 Threads
            t1 = new Thread(test);        
            t1.start();
        }        
    }
    
    public void run() {
        System.out.printf("Before %s : %d\n", Thread.currentThread().getName(), no.get());
        no.set(no.get() + 1);
        System.out.printf("After %s : %d\n", Thread.currentThread().getName(), no.get());
    }
}

Only one object is created and used in 10 different threads yet each thread has its own initialized variable 'no'.

Saturday, September 29, 2012

Google Code Jam 2012 Practice - Reverse Words

In order to improve my algorithm skills I wanted to try out some practice challenges offered by Google Code Jam 2012. I wrote the following code to solve Reverse Words challenge. I verified the output by submitting them to Google Code Jam page and there are correct.

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;

/**
 * 
 * @author Shazin Sadakath
 *
 */
public class CodeJamSolution2 {
    private File inFile;
    private File outFile;
    
    public static void main(String[] args) {
        if(args.length < 2) {
            System.out.println("Usage : java CodeJamSolution2 <In File> <Out File>");
            return;
        }
        CodeJamSolution2 cjs2 = new CodeJamSolution2(args[0], args[1]);
        cjs2.start();
    }
    
    public CodeJamSolution2(String inFile, String outFile) {
        this.inFile = new File(inFile);
        this.outFile = new File(outFile);
    }
    
    public void start() {
        BufferedReader br = null;
        BufferedWriter bw = null;
        try {
            br = new BufferedReader(new FileReader(inFile));
            bw = new BufferedWriter(new FileWriter(outFile));
            int count = Integer.parseInt(br.readLine());
            String[] words = null;
            for(int i=0;i<count;i++) {
                words = br.readLine().split(" ");
                reverse(words);
                bw.write(String.format("Case #%d: %s\n", i+1, output(words)));
            }
        } catch(Exception e) {
            e.printStackTrace();
        } finally {
            if(br != null) {
                try {
                    br.close();
                } catch (IOException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }
            if(bw != null) {
                try {
                    bw.close();
                } catch (IOException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }
        }
    }
    
    /**
     * Method to reverse a word array
     * works at O(N / 2) 
     * 
     * @param words
     */
    public void reverse(String[] words) {
        int i=0;
        int j=words.length - 1;
        String temp = null;
        while(i < j) {
            temp = words[i];
            words[i] = words[j];
            words[j] = temp;
            i++;
            j--;
        }
    }
    
    /**
     * method to append all the elements in the array together
     * 
     * @param words 
     * @return appended words sentence
     */
    private String output(String[] words) {
        StringBuffer sb = new StringBuffer();
        for(int i=0;i<words.length;i++) {
            sb.append(words[i]);
            if(i != words.length - 1) {
                sb.append(" ");
            }
        }
        return sb.toString();
    }
}