Google
 

Friday, January 30, 2009

How to Upload Multiple Files in Java

package com.mp.ws;

/**
*
* @author nitinaggarwal
*
*/
public interface IFileUpload {

public byte[] get1File(String name);

public byte[][] getMulitpleFiles(String names[]);

public String getXmlFile(String name);

}

package com.mp.ws;

/**
*
* @author nitinaggarwal
*
*/
public class FileUpload implements IFileUpload {

public byte[] get1File(String name) {
byte data[] = null;

FileReader fr = new FileReader();
try {
data = fr.readBinFilePath(name);
} catch (Exception e) {
e.printStackTrace();
}

return data;
}

public String getXmlFile(String name) {
String data = null;
FileReader fr = new FileReader();
try {
data = fr.readTextFile(name);
} catch (Exception e) {
e.printStackTrace();
}
return data;
}

public byte[][] getMulitpleFiles(String[] fnames) {

byte[][] data = new byte[fnames.length][];
for (int i = 0; i < fnames.length; i++) {
FileReader fr = new FileReader();
try {
data[i] = fr.readBinFilePath(fnames[i]);

System.out.println("abc" + data[i]);
} catch (Exception e) {
e.printStackTrace();
}
}
return data;
}

}

package com.mp.ws;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;

/**
*
* @author nitinaggarwal
*
*/
public class FileReader {

public String readTextFile(final String name) throws Exception {
StringBuffer xmlFromFile = new StringBuffer();
InputStream instr = null;
//instr = getFilePath2(name);

instr = new FileInputStream(name);

if (instr == null)
throw new FileNotFoundException();
InputStreamReader streamreader = null;

try {

streamreader = new InputStreamReader(instr);
int x = 0;
x = streamreader.read();

while (x != -1) {
xmlFromFile.append((char) x);
x = streamreader.read();

}

} catch (Exception e) {

System.out.println("Exception " + e.getMessage());
throw e;

} finally {
streamreader.close();

}

return xmlFromFile.toString();

}

public byte[] readBinFileFromClassPath(final String name) throws Exception {

byte bytearray[] = null;
FileInputStream fileinputstream = null;
try {

fileinputstream = new FileInputStream(getFilePath(name));
int numberBytes = fileinputstream.available();
bytearray = new byte[numberBytes];
fileinputstream.read(bytearray);

} catch (Exception e) {
System.out.println("Exception " + e.getMessage());
throw e;

} finally {
if (fileinputstream != null)
fileinputstream.close();
}

return bytearray;
}

public byte[] readBinFilePath(final String name) throws Exception {

byte bytearray[] = null;
FileInputStream fileinputstream = null;
try {

fileinputstream = new FileInputStream(name);
int numberBytes = fileinputstream.available();
bytearray = new byte[numberBytes];
fileinputstream.read(bytearray);

} catch (Exception e) {
System.out.println("Exception " + e.getMessage());
throw e;

} finally {
if (fileinputstream != null)
fileinputstream.close();
}

return bytearray;
}

public void writeBinFileToPath(String name, byte data[]) throws IOException {

FileOutputStream fileoutputstream = new FileOutputStream(name);

try {
fileoutputstream.write(data);

} catch (IOException e) {
System.out.println(e.getMessage());

} finally {
if (fileoutputstream != null)
fileoutputstream.close();
data = null;
}

}

private InputStream getFilePath2(String filename) {
return this.getClass().getClassLoader().getResourceAsStream(filename);

}

private String getFilePath(String filename) throws FileNotFoundException {
String path = this.getClass().getClassLoader().getResource(filename)
.getPath();
if ("".equals(path))
throw new FileNotFoundException();
return path;

}

}

package com.mp.ws;

/**
*
* @author nitinaggarwal
*
*/
@javax.jws.WebService(targetNamespace = "http://ws.mp.com/", serviceName = "FileUploadService", portName = "FileUploadPort")
public class FileUploadDelegate {

com.mp.ws.FileUpload fileUpload = new com.mp.ws.FileUpload();

public byte[] get1File(String name) {
return fileUpload.get1File(name);
}

public byte[][] getMulitpleFiles(String[] fnames) {
return fileUpload.getMulitpleFiles(fnames);
}

public String getXmlFile(String name) {
return fileUpload.getXmlFile(name);
}

}

package com.mp.ws;
/**
*
* @author nitinaggarwal
*
*/
public class FileUploadTester {

public static void main(String[] args) {
FileUpload fu = new FileUpload();
String fnames[]= {"c:/uploadme.doc","c:/uploadme.doc","c:/uploadme.doc"};

byte[][] data = fu.getMulitpleFiles(fnames);

}
}

Tuesday, January 27, 2009

Pagination: Server Side or Client Side?

What is it best way to handle pagination/sorting and filtering? Server side or at the client side?

Numerous times in your projects you might have to face a situation where you need to pull chunks of data dynamically. The obvious issue that you then face is pagination\sorting and filtering. You then start to think if it would be better to handle it all at the server side or should hold back and handle it on the client side.

Well there is no clear winner amongst the two; neither there is a right or wrong approach.

The right answer depends on your priorities and the size of the data set to be paginated.

If you have large number of pages doing it on client side will make your user download all the data at first which might not be needed, and will defeat the primary benefit of pagination. In such a scenario you are better of requesting pages in chunks from the server via AJAX. So let the server do the pagination. You can also pre-fetch the next few pages the user will likely view to make the interface seem more responsive. However, when implementing it, you need to make sure that you're optimizing your SQL properly. For instance, I believe in MySQL, if you use the LIMIT option it doesn't use the index so you need to rewrite your SQL to use the index properly.

If there are only few pages, grabbing it all up-front and paginating on the client may be a better choice. That gives you the obvious benefit of faster subsequent page loads. Unless really required we should not choose the Server side pagination in such a case.

Server side pagination is better for:

  • Large data set
  • Faster initial page load
  • Accessibility for those not running JavaScript
  • Complex view business logic
  • Resilience to concurrent changes

Client side pagination is better for:

  • Small data set
  • Faster subsequent page loads
  • Sort & filter requirements supported fully (unless results greater than max size).

To sum up, if you're paginating for primarily cosmetic reasons, it makes more sense to handle it client side. And if you're paginating to reduce initial load time, server side is the obvious choice. Of course, client side's advantage on subsequent page load times diminishes if you utilize Ajax to load subsequent pages.

Click here For More details and comments

Also refer to http://nitinaggarwal.wordpress.com/

Comments\suggestions are welcome.

Friday, January 23, 2009

Reason for the continuous fall of the Pound.

How bad is this fall in the pound? In a word: hideous.

Measured against a basket of other currencies – the best way in this globalised era to test a currency's strength – the pound has fallen in the past year by around a quarter.

This is more than any previous devaluation in the past century – greater even than in 1931, when, under Ramsay MacDonald, the UK was forced to abandon the gold standard and saw the pound plummet by more than 24 per cent against the dollar. Greater than after Black Wednesday and the abandonment of the Exchange Rate Mechanism; worse than in 1967, when Harold Wilson was forced to make an extraordinary televised statement to the nation claiming that the "pound in your pocket" would not be worth any less after his devaluation.

As anyone who has been overseas recently will know, it has fallen from over $2 against the dollar to under $1.40. This week it touched the lowest level since the Plaza Accord of 1985 – in which year the pound very nearly went to parity against the US currency. Against the euro, the pound has slid from €1.35 to just above €1 in the past year.

In practice this means that anyone travelling to the Continent will find it tough to get anything more than a euro for every pound they want exchanged, after the bureau de change has taken its cut and commission.

For Gordon Brown, who mocked the Conservatives in 1992, it is acutely embarrassing. Back then, he said: "A weak currency arises from a weak economy which in turn is the result of a weak Government." This time he is staying conspicuously quiet about the whole thing.

But why is sterling sliding?

In large part because it reflects Britain's economic prospects. The UK is facing a nasty recession – one that is likely to be as bad as any experienced by the Western world. House prices are falling at the fastest rate
since the 1930s, unemployment is on the rise and will soon climb beyond two million, consumer spending is sliding.

In such circumstances, investors are naturally likely to withdraw their money from the UK. On the one hand, they will sell sterling shares and investments since they are likely to fall in value as a result of the recession. On the other, those who invest their cash in the UK will pull it out of the country, since the Bank of England is cutting interest rates as a response to the slump. Any money in sterling in a UK bank account is earning very little interest, so overseas investors calculate they might as well take it elsewhere.

How worried ought we to be?

If the above was all that was happening, not unduly. In a world of floating exchange rates, the falling pound is not merely a symptom of the disease (the recession) but its cure. All else being equal, a weak pound should boost the exports of British companies, since it makes their products cheaper than those of their overseas rivals.

Machinery produced in the north of England is fast becoming cheaper than that produced in eastern Europe. And this goes not just for visible trade – actual physical goods – but for invisible trades such as legal or financial services.

So, although Britain's manufacturing sector has shrunk significantly since the 1980s and 1990s, the comparative value of UK products should nevertheless help boost the economy. The same goes for tourism, which has already picked up significantly as foreigners come to the UK to pick up bargains. London's days as Europe's most expensive city are well behind it.

The problem, however, is that all else is not equal at the moment: the appetite abroad for exports of any type has dried up in a way never before experienced. From Europe to the Americas to Asia, trade has almost entirely seized up as the recession has turned global. And let's not mention financial and legal services – the appetite for which has evaporated.

In the 1990s and the 2000s, successive governments decided to focus the UK's economy on financial services. A decision was taken to put almost all our economic eggs in one basket. Unfortunately, that basket has come crashing to the ground.

So is this now a full-blown sterling crisis?

Until recently, it wasn't a crisis. There are, broadly speaking, two types of devaluation – one benign, the other far less so. The good one is much as described above – a competitive devaluation in the pound which, over time, provides a cure. After the pound fell in 1992, it ushered in years of recovery and then prosperity for the economy.

The bad version is a full-scale crisis – a run on the pound. It is a vote of no-confidence in a country's economic policies, and occurs when investors start pulling their cash out of the UK not because of a temporary period of recession but because they are worried about the direction the economy is taking (over years and decades rather than months).

In the months up until this week it was possible to argue that this represented a competitive devaluation, and would be a boon for exporters. All of that changed on Monday. Following Gordon Brown and Alistair Darling's announcement of a second bail-out package for struggling banks, the pound suffered what can be described as a minor run. Investors took fright that the UK was drawing closer to insolvency, and as a response sold off their stocks of government debt.

It is difficult to overstate the significance of this. Britain's power and prosperity since the earliest days of the Union have been founded on its reputation for being a good risk.

Whereas other countries, such as Argentina and Russia, have occasionally defaulted on their debts, Britain's government has always been among the best borrowers in the world. For the first time in decades this is being questioned.

The rumour around the market this week was that Standard & Poor's, a ratings agency which tells traders what has and does not have the stamp of approval, was set to downgrade Britain's government sovereign debt. The agency has since denied this, but the UK fulfils many of the criteria for such a humiliating decision.

Does it really matter if Britain's creditworthiness comes under question?

Yes – immensely. Britain has a large current account deficit – of about £7.7 billion. This means we, as a nation, spend more money than we generate each year. This is no problem while we can borrow the difference, but that £7.7 billion chunk has to come from overseas investors. Should they stop lending to the UK, Britons would face a sudden, painful jolt and their living standards would fall even faster and more painfully than they are at the moment.

The Government would have to seek assistance from the International Monetary Fund which would, most likely, dole out a baleful dose of economic medicine – higher interest rates, lower government spending and immediate austerity.

Although, in the long run, Britain does need to borrow less and save more, such an adjustment should ideally take place over years, not weeks.

Isn't this all really the fault of the bankers as well as the Government?

Indeed it is. Now that the majority of the banking system is effectively nationalised (and the Government has promised to insure the nastiest debts of the remaining private banks) the taxpayer is effectively standing behind another massive liability. The banking system has about $4.4 trillion of foreign debts, and most analysts predict that around £200 billion of these could default.

What scared investors this week was the sudden realisation that the Government, rather than the banks, will have to pay the bill. The UK, unlike Iceland, does not have the luxury of being able to default on those foreign debts (remember the fracas when Britons faced losing their savings in Icelandic banks?)

Were the UK to do the same as Iceland, the size of Britain's liabilities are such that it would trigger an international panic and financial meltdown worse than when Lehman Brothers collapsed last year.

This all sounds unremittingly gloomy. Is there any solution?

Mainly to hope that the economic medicine served up by the Bank of England and its fellow central banks does the trick. As long as house prices are falling and unemployment is rising, the liabilities of the Government will swell and the pound will remain weak. But when, eventually, the economic backdrop improves, so should the financial outlook, and, eventually, the pound.

However, there is little hope of returning to the heady days of a near-80p euro and a $2 pound. The pound was significantly stronger than it ought to have been over the previous decade. It is probably undervalued now, and if all goes well it should bounce back in the coming years.

However, everything now depends on trust: that trust will return to the beleaguered financial system; that investors will start to trust the Government again and that Britons trust that there will be life after the recession.

Source: www.telegraph.co.uk

Monday, November 24, 2008

Java Console and File Input/Output Cheat Sheet

Console Output
System.out.print("Hello ");
System.out.println("world");
Console Input
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String text = in.readLine();
File Output
PrintWriter out = new PrintWriter(new FileWriter("K:\\location\\outputfile.txt")));
out.print("Hello ");
out.println("world");
out.close();
File Input
BufferedReader in = new BufferedReader(new FileReader("K:\\location\\inputfile.txt"));
String text = in.readLine();
in.close();
Converting input data
String text = in.readLine();
int x = Integer.parseInt(text);
double y = Double.parseDouble(text);
Reading until EOF
while (in.ready()) {
text = in.readLine();
System.out.println(text);
}
Pulling apart delimited items on a line
String text = "Beggars in Spain*Nancy Kress*1992";
StringTokenizer tokenizer = new StringTokenizer(text,"*");
String title = tokenizer.nextToken();
String author = tokenizer.nextToken();
String year = tokenizer.nextToken();
String letters = "a b c d e f g h i j";
StringTokenizer tokenizer = new StringTokenizer(text," ");
String[] allText = new String[10];
int pos = 0;
while (tokenizer.hasMoreTokens())
allText[pos++] = tokenizer.nextToken();

Thursday, October 30, 2008

Splitting the ALSB Logging

What it does

Normally the AquaLogic Service Bus logs in the main Weblogic domain log. This sample code splits the ALSB logging to a separate log.

How it Works

A startup class is used to direct ALSB log entries to the new ALSB log. A log filter is used to stop the ALSB logs from continuing to also go to the Weblogic domain log.

How to install and execute

Compile and deployment steps:

1. Review the ALSBLogStart.properties to make sure these are correct for default values.

2. Run the ant build.xml "jar" task to compile and generate the jtvalsbLogger.jar

3. Put the jtvalsbLogger.jar file in the weblogic domain lib directory on each server in the cluster

<wl_home>/../user_projects/domains/<domain>/lib

For example on my laptop I put the jar file in C:/bea92ALSB/user_projects/domains/alsb_cluster/lib

4. Copy the Create_ESB_Logging_Resources.py and Create_ESB_Logging_Resources.properties files

to some directory on the wls server box.

Edit the Create_ESB_Logging_Resources.properties file to match the desired settings

for the target wls domain.

5. Make sure the target wls domain is running. If the target is a cluster, make sure all the managed

servers are running as well as the admin server.

6. From a command prompt, run the setDomainEnv script in the <wl_home>/../user_projects/domains/<domain>/bin directory

(In windows its setDomainEnv.cmd. In linux its setDomainEnv.sh)

Change directory to the directory containing your Create_ESB_Logging_Resources.py file.

7. Run the wlst script to config the wls domain for the new logging settings.

wlst Create_ESB_Logging_Resources.py


The output should look something like (depending on whether you are installing to cluster):

Starting an edit session ...

Started edit session, please be sure to save and activate your

changes once you are done.

creating Log Filter ALSBRejectLogFilter

setting attributes for LogFilter ALSBRejectLogFilter

creating startup class ALSBLogStart

setting attributes for startupClass ALSBLogStart

assigning logFilter ALSBRejectLogFilter to server log AlexAdminServer

assigning logFilter ALSBRejectLogFilter to server log ESB1

assigning logFilter ALSBRejectLogFilter to server log ESB2

Saving all your changes ...

Saved all your changes successfully.

Activating all your changes, this may take a while ...

The edit lock associated with this edit session is released

once the activation is completed.

Activation completed

End of Main


Finished.

8. Restart the WLS domain. As the admin server and each managed server start up you should

see something like the following in the console output or server log:

<May 14, 2008 8:41:45 AM EDT> <Notice> <Log Management> <BEA-170027> <The server initialized the domain log broadcaster successfully

. Log messages will now be broadcasted to the domain log.>

*** ALSB Log path = servers/AlexAdminServer/logs/

*** ALSB Log fileName = alsb%u.log

*** ALSB Log fileSizeBytes = 5000000

*** ALSB Log fileCount = 10

*** ALSB Log fileAppend = false

*** Added ALSB Log handler to server logger= weblogic.logging.WLLogger@1563a3d

These messages in the log indicate the startup class is loading properly in each wls server.

9. Check the appropriate log directory to make sure each server in the cluster (admin and managed servers)

have a new alsb log file. The location and name of the new log files will match the ALSBLogStart.properties

settings which are echoed to the server log as shown in step 8 above.

Other related stuff

http://forums.bea.com/thread.jspa?threadID=300003205

http://forums.bea.com/thread.jspa?threadID=570001171

Wednesday, August 27, 2008

I have been on several teams where we studiously designed UML diagrams at the beginning of the project. As the project progressed, and deadlines approached,

the UML diagrams were left somewhere behind, not to be updated in months. When a new developer joined the team, we showcased the old UML diagrams,

and kept telling "Oh, we never had time to update them, please see the source code to get an idea. And, don't hesitate to ask if you have any doubt's".

I am sure, you all have gone through the same scenario.

However, we don't have to keep making up stories anymore, since this article shows how easy and simple it is to include UML diagrams

within your Javadoc and also keep them updated with every change in the source code repository. We can do these in less than a few minutes,

and in a few simple steps.

Getting started with UmlGraph takes five steps:

  1. Download the source code for UMlGraph.
  2. Download and install Graphviz.
  3. Make changes to your Ant build file.
  4. Run the Ant target.
  5. Add this target to your CI job.

Step 1: Download the source code for UMLGraph from here. Unzip the contents. To compile the Java doclet from the source code run

ant on the build.xml file. Copy the UmlGraph.jar file to your projects library. If there is a version mismatch between the different versions

of JDK you are using you get an exception like this:

java.lang.UnsupportedClassVersionError: Bad version number in .class file

Make sure you recompile the UMLGraph source code, and copy the library to your project.


Step2 : Download and install Graphviz from
here. The dot file needs to be post-processed with Graphviz to produce the actual UML diagram.

Running the UmlGraph doclet will generate a Graphviz diagram specification that can be automatically processed to create png drawings.

You can also generate other formats using Graphviz as well. If Graphviz isn't installed you will get an exception as shown below:

BUILD FAILED
/Users/meerasubbarao/Development/webservices-samples/build.xml:107:
Execute failed: java.io.IOException: dot: not found
Total time: 269 milliseconds


Step 3. Changes to your build.xml file.
Assuming you already have a working project, with Ant build file. Add the following target to your build.xml file as shown below:

view plaincopy to clipboardprint?

  1. <target name="javadocs" depends="build" description="generates javadoc and also UML Diagram">  
  2.         <mkdir dir="${reports.dir}/javadoc"/>  
  3.             <javadoc sourcepath="${src.dir}" packagenames="com.stelligent.*" destdir="${reports.dir}/javadoc"  
  4.                 classpathref="java.classpath" private="true">  
  5.                    <doclet name="org.umlgraph.doclet.UmlGraphDoc"  
  6.                       path="lib/UMLGraph.jar">  
  7.                         <param name="-attributes" />  
  8.                         <param name="-operations" />  
  9.                         <param name="-qualify" />  
  10.                         <param name="-types" />  
  11.                         <param name="-visibility" />  
  12.                     </doclet>  
  13.                   </javadoc>  
  14.           <apply executable="dot" dest="${reports.dir}" parallel="false">  
  15.             <arg value="-Tpng"/>  
  16.             <arg value="-o"/>  
  17.              <targetfile/>  
  18.              <srcfile/>  
  19.              <fileset dir="${reports.dir}" includes="*.dot"/>  
  20.              <mapper type="glob" from="*.dot" to="*.png"/>  
  21.           </apply>  
  22.     </target>  

<target name="javadocs" depends="build" description="generates javadoc and also UML Diagram">
        <mkdir dir="${reports.dir}/javadoc"/>
     <javadoc sourcepath="${src.dir}" packagenames="com.stelligent.*" destdir="${reports.dir}/javadoc"
         classpathref="java.classpath" private="true">
          <doclet name="org.umlgraph.doclet.UmlGraphDoc"
              path="lib/UMLGraph.jar">
          <param name="-attributes" />
          <param name="-operations" />
          <param name="-qualify" />
          <param name="-types" />
          <param name="-visibility" />
          </doclet>
          </javadoc>
         <apply executable="dot" dest="${reports.dir}" parallel="false">
         <arg value="-Tpng"/>
         <arg value="-o"/>
         <targetfile/>
         <srcfile/>
         <fileset dir="${reports.dir}" includes="*.dot"/>
         <mapper type="glob" from="*.dot" to="*.png"/>
         </apply>
    </target>

 
 

A number of options contol the operation of UMLGraph class diagram generator. These can be specified as parameters within your build file as shown above.

Details about a few options are:

-output
Specify the output file (default graph.dot).
-d
Specify the output directory (defaults to the current directory).
-qualify
Produce fully-qualified class names.
-horizontal
Layout the graph in the horizontal direction.
-attributes
Show class attributes (Java fields)
-operations
Show class operations (Java methods)
-constructors
Show a class's constructors
-visibility
Adorn class elements according to their visibility (private, public, protected, package)
-types
Add type information to attributes and operations
-enumerations
Show enumarations as separate stereotyped primitive types.
-enumconstants
When showing enumerations, also show the values they can take.
-all
Same as -attributes -operations -visibility -types -enumerations -enumconstants

Take a look here for more options.

Step 4. Run the ant target:
Open a command window and run the ant target: ant javadocs and you should see output as such in your console window:

meera-subbaraos-macbook-9:webservices-samples meerasubbarao$ ant javadocs
Buildfile: build.xml

init:

cleanGenerated:

build:
[javac] Compiling 22 source files to /Users/meerasubbarao/Development/ci-jobs/jobs/PetStore_Nightly/workspace/webservices-samples/classes
[javac] Note: Some input files use unchecked or unsafe operations.
[javac] Note: Recompile with -Xlint:unchecked for details.

javadocs:
[javadoc] Generating Javadoc
[javadoc] Javadoc execution
[javadoc] Loading source files for package com.stelligent.biz.ws...
[javadoc] Loading source files for package com.stelligent.ent.jpa...
[javadoc] Constructing Javadoc information...
[javadoc] UmlGraphDoc version 5.0, running the standard doclet
[javadoc] Standard Doclet version 1.5.0_13
[javadoc] Building tree for all the packages and classes...
[javadoc] Building index for all the packages and classes...
[javadoc] Building index for all classes...
[javadoc] Generating /Users/meerasubbarao/Development/ci-jobs/jobs/PetStore_Nightly/workspace/webservices-samples/reports/javadoc/stylesheet.css...
[javadoc] UmlGraphDoc version 5.0, altering javadocs
[javadoc] Building Package view for package com.stelligent.biz.ws
[javadoc] Building Package view for package com.stelligent.ent.jpa
[javadoc] Building Context view for class com.stelligent.biz.ws.SupplierManagerBean
[javadoc] Building Context view for class com.stelligent.biz.ws.SupplierManager
[javadoc] Building Context view for class com.stelligent.biz.ws.SignonManagerBean
[javadoc] Building Context view for class com.stelligent.biz.ws.SignonManager
.....

BUILD SUCCESSFUL
Total time: 8 seconds
meera-subbaraos-macbook-9:webservices-samples meerasubbarao$

 
 

The javadoc generated is pretty neat with UML diagrams on the top:


Step 5: Add this target to your CI Job.
If you already have a CI server like
Hudson up and running, which runs commit builds and nightly builds, adding this new target is a one step process.

In my case, I already have a nightly job running, I added this ant target to my default target as shown below:

view plaincopy to clipboardprint?

  1. <target name="all" depends="cleanAndDeployForCoverage, javadocs" />  

<target name="all" depends="cleanAndDeployForCoverage, javadocs" />

Next, force a build on the Hudson job, publish the javadocs, and you can see the results on the hudson dashboard.


The Javadoc embedded with UML diagrams displayed from within the Hudson dashboard:


Now that we have UML diagram integrated within our build file, and also our CI job, we can ensure that our code base and the UML diagrams are always in sync.

We saw how to include these ant targets in our commit builds or nightly builds of our CI jobs, and also published these artifacts as part of our post build process.

 
 

Resources:

Monday, August 18, 2008

Choosing between Routing and Orchestration in an ESB

Topics

ESB,

Orchestration

Tags

Routing,

JBI,

Enterprise Integration Patterns

Introduction

Enterprise Service Buses are nowadays indeed useful solutions that combine an array of tools allowing to solve practical problems in the field of application and service integration. However, they present the same mild inconvenience that a toolbox does to its user who knows that the solution to his problem has to be in the box, but for the sake of him can't figure out which one it is!

RelatedVendorContent

The Key to SOA Governance: Understanding the Essence of Business

Introducing application infrastructure virtualization and WebSphere Virtual Enterprise

The Agile Business Analyst: Skills and Techniques needed for Agile

Real World REST & Document-Oriented Distributed Databases Tracks @ QCon SF Nov 19-21

Agile Tool Evaluation Guide

The goal of this article is to help ESB users choose the right answer according to their needs, when confronted with the most complex and diverse of ESB concepts: routing and orchestration. Instead of abstract theorizing we will ground our efforts and reasoning in simple, real-world examples with the OW2 PEtALS JBI compliant ESB [1], in an attempt to fill the void between low-level routing and global, business service orchestration. In other words: We will try to uncover how the different layers of routing and orchestration build up.

From Enterprise Service Bus to the routing problem

ESBs have a lot of fields of application, including implementing information system-wide Service Oriented Architectures (SOAs). But at the lowest level they all aim to ease application and service integration - that is, letting one application or service call another. This very simple and common endeavour has various additional levels of complexity :

  • "routing", when there is not one but many source services where calls originate from or target services to choose between ;
  • "protocol bridges", when services are exposed on another protocol, belong to other servers or even other information systems ;
  • "transformations", when service messages do not have the same data format – which is rule rather than exception.

Those three : routing, protocol, transformation have a range of close siblings, but may nonetheless be considered the main ESB concepts. In this article we will focus on the first one and how it relates to a close sibling of his : orchestration. As a short introduction, let us say that routing is fundamentally low-level, near or in the ESB core, and relies on technical configuration (like service deployment descriptors) to provide technical decisions on where a message has to be sent. Orchestration can be seen as combining service calls to create higher-level, more useful composite services, but also often has a definitive "business-level" ring, and in this case is shorthand for implementing business-level processes combining business-specific services across applications and information systems.

Routing versus orchestration: neither a "one size fits all" nor a "black and white" world

So how are orchestration needs addressed in an ESB? It would seem logical to use an orchestration engine provided with the middleware solution. However, this is far too simple an answer to a complex question. Let us consider the following example.

Displaying a list of items

The "ItemManager" application is designed to manage items through operations like creation, update, deletion. This application is connected to an "ItemManagementListener" service, that publishes notifications when an item is updated.

Another application, the "HammerMonitor" application, is a monitoring tool that displays information on item updates that are specifically about hammers. This application exposes a "HammerMonitor" service with a "display" operation that receives these notifications.

Both services are exposed on an ESB. What we want is to let the HammerMonitor display hammers that are known to the ItemManagement application.

In order to connect the ItemManagementService to the HammerMonitorService, we need to configure the ESB connectors (aka "binding components"). One connector is linked to the ItemManager application, the other one is linked to the HammerMonitor application.

Moreover the connector linked to the HammerMonitor application is configured to expose, inside the ESB, an endpoint whose name can be "hammerMonitorService". Thus, a simple way to achieve our goal is to configure the connector linked to the ItemManager application so that it calls, inside the ESB, the endpoint "hammerMonitorService" whenever it receives a message from the ItemManager application.


However, as often in the real world, let us say both services have different data formats. This is not a barrier to SOA, as SOA defines a loosely coupled architecture (i.e. it is not mandatory for a service consumer to fit to the service provider definition).

The ItemManagement application provides to the ItemManagementListenerService the following message:

<items>
<item type="Hammer" name="hammer1"/>
</item>

And the ItemMonitorService has an operation "display" using the following format:

<hammers>
<hammer hammerName="hammer1"/>
</hammers>

At this point, a mere call does not work anymore to link both services. Data provided by the ItemManagement application needs to be first transformed. This is actually a very simple, local need of orchestration that has nothing to do with the business level.

A first way to address this would be to use a common, well-known orchestration solution like full blown, externally deployed, BPEL-supporting orchestration engine [2]. This would work, but in this case this would be akin to use a hammer (pun intended) to open a nut: either all transformed messages would have to go through a single central, remote orchestration engine, in a manner akin to the obsolete "hub" integration architecture, or there would have to be an orchestration engine deployed on each node – an obviously far too heavy solution for this simple problem.

So it appears a single, global, business-level answer to orchestration needs is not enough : what about the "dirty" work that has to be done between the routing and the business level, when generic routing provided by the bus is not enough and the main concern is not yet to implement business rules or processes by manipulating SOA-managed business services, but merely to combine technical, "behind-the-scene" services so they "get the work done" ?

The bus-level, specific development approach : interceptors

The lowest level answer to technical routing and orchestration needs lies in enhancing the ESB's built-in features.

In the case of our previous example, a direct way to circumvent the problem of data consistency between the application that sends the message and the application that receives it is to add some logic in the connectors (i.e. the binding components of the ESB).

For instance, the binding components provided by the PEtALS ESB can be extended with "interceptors". An interceptor is a piece of Java code that is executed in the "sender" binding component before a message is sent into the bus, or in the "receiver" component, when a message is delivered.

 
 

In our example, this code can call an XSL transformation to adapt the ItemManagement message format to the HammerMonitor format.

 
 


Nevertheless, this approach is very restrictive and not extensive. If the XSL transformation is performed in the "receiver" connector (linked to the HammerMonitor), it assumes that all messages received have the ItemMangement XML structure. If a message comes from another application, it can have a different structure, and in this case the XSL transformation may fail.

The interceptor could check the incoming message structure and choose one XSL transformation or another, depending on the message, but would still remain very coupled to the sender. This approach does not respect the loose coupling concept of SOA. Moreover any other need besides transformation would imply developing another set of specific features within the ESB engine, and that can't be expected from ESB users, nor should it.

The component ("building block") oriented approach: the EIP toolset

ESBs offer integration facilities by providing integration components. These components can do a range of small, useful, flexible operations between a consumer and a service provider. They typically implement several Enterprise Integration Patterns (made well known by Gregor Hohpe [3]) and are the Swiss knife of ESB users.

Independent of the service descriptions (WSDL and others), these EIP Components just perform small things. The most known are:

  • The "pipe" pattern: a single event triggers a sequence of processing steps, each performing a specific function. The EIP Component sequences the calls.
  • The "content based router" pattern: the EIP Component examines the message content and routes the message onto a different channel, based on data contained in the message.
  • The "message dispatcher" pattern: the EIP Component sends the message to a list of service providers (multipoint)
  • The "scatter gather" pattern: the EIP Component routes a request message to a number of service providers. It then aggregates all the responses into a single response message

The knowledge of all EIP Component operations allows the developer to combine business applications (consumers and service providers) with several "integration pattern bricks". The final result is a composite integration. Each brick of the integration is a service.

Of course, in order to design this composite integration, a dedicated graphical IDE is paramount since it brings, in addition to ease of use, a centralized view of the configuration of all the bricks. For instance, the following samples are designed by the PEtALS ESB integration tool.

The pipeline

The pipeline pattern is used to "pipe" an incoming message to several services. The message is sent to the first one, and its response is sent to the second one, whose response is itself sent to the third one, and so on.

Adaptation between a consumer and a service provider

The ItemManagement use-case that we described previously can be designed with this kind of assembly, with a transformation component and a "pipe" brick.


Management of service version evolutions

The same behaviour can be used to manage service version evolution, in the following way. A consumer always sends the same message structure to the "pipe" brick, which is a proxy to the real service. When the service signature changes, the "pipe" brick sends the consumer message first to an XSL transformation (to adapt the consumer's message to the new service format), then it sends it to the new version of the service. And nothing has changed for the consumer.

Content based routing

We've seen how to compose several services into a single one. But the dynamic process aspect is not solved. Here again comes the routing challenge: how to call one service among many?

How to switch a call to one service between many services? Well, the router brick may perform some tests to switch the request to one version or to the other one.

For instance, the ItemManagementListener can send notifications for hammer and saw items to a "content based routing" Component. This component tests the name of the item in the message, and sends it to the correct monitoring services (HammerMonitorService or SawMonitorService). As each service defines a different format, two different transformations have to be performed before sending the message to the correct service. So we compose the "routing" brick with "pipe" and "transformation" bricks.


Dispatcher

Another integration need could be to send a request to several services (multi point communication). For example, when an item order is sent from a front application to the ordering system, an email can also be sent to the customer for confirmation. For example, the message is sent to an ordering service and to an SMTP service.

We can imagine that the ItemManangementListener service, which sends notifications from the ItemManagement application, has to publish the notifications to the HammerMonitor, to the SawMonitor and to a global monitoring tool (which receives all notifications).

A "dispatcher" integration brick can be added to the previous composite integration to send the message to the "routing" brick and to the global monitoring service.


The DSL-based approach : the light orchestrator

Where patterns end, the light orchestrator starts

Enterprise Integration Patterns are great concepts that help architecting routing and orchestration solutions, and the EIP component is a great tool allowing to actually design solutions to those problems. However, in complex integration cases, the composite assembly approach easily leads to too scattered and over-designed configurations. Moreover, like all patterns, EI Patterns are limited in numbers, while the real world is full of unexpected cases that call for a more flexible solution.

The answer is to use a light orchestration-specialized DSL (Domain Specific Language), which is what the "light orchestrator" or "Enterprise Integration Orchestration" component provides in PEtALS.

When is it the right time to use such a component? It depends on a lot of things, including development practices, but here are a few hints:

  • When, as we've just said, it is hard to envision a solution using only straight, "by the book" patterns,
  • When "routing" and multiplexing patterns such as the one previously described become commonplace (this might also hint at using a rules engine component),
  • When there are many layers of embedded "bricks" in an EIP-based system,
  • When an orchestration subsystem is best understood and maintained when being solved in one single place rather than scattered across several, albeit simple, EIP "bricks"
  • When there is a need for rarer EI Patterns that is not supported by the EIP component (fully dynamic routing, Return Address, Content Enricher, Normalizer…)

EIOrchestration use case : complex dynamic routing

In order to showcase the EIOrchestration component, let's focus on our system's extensibility.

We've already seen how to add a saw-specific monitoring feature to a system that was initially only able to handle hammers. We could add other tool-specific abilities the same way. However this would require reconfiguring them again each time we want to add another tool type. So what if we want the people using our bus to be able to add their own tool types and specific monitoring abilities?

Example: Our customer wants to be able to dynamically add a ScrewdriverMonitorService for tools of type Screwdriver, and DrillerMonitorService for Drillers, and so on.

We could tell them to mention within each message the name of the tool-specific monitor service it must be sent to, and add dynamic routing capabilities to our system.

Example: We enhance the ItemManagement application so it provides the following message body to the ItemManagementListenerService:

<items>
<item type="Screwdriver" name="screwdriver1"
customMonitorService="ScrewdriverMonitorService"/>
</item>

where customMonitorService is an additional data field that may be provided by the customer through the ItemManagement application.

In an ESB, routing such a message can be done by dynamically choosing its recipient service according to the "customMonitorService" attribute. For example, this can be done in PEtALS using the EI Orchestration component, using its "get-calls-by-xpath" feature:

 
 

<eip:get-calls-by-xpath base="/items/item" service="@customMonitorService"
operation="'display'"/>

Which, in our example, will call the ScrewdriverMonitorService with the previous message.

A complete EIOrchestration sample for PEtALS

We've said at the beginning that the PEtALS EIOrchestration component allows to handle process complexity well. So here is an example that gathers in a single configuration everything we've seen in this article: piping ("eip:chain" element) and transformations, simple content based routing ("eip:choose" element) and finally dynamic routing ("eip:get-calls-by-xpath" element), while still being quite readable:

<eip:eip>
<eip:chain>
<eip:choose>
<eip:when test="/items/item[0]/@type = 'Hammer'">
<eip:call service="ItemToHammerService" operation="transform"/>
<eip:call service="HammerMonitorService" operation="display"/>
</eip:when>
<eip:when test="/items/item[0]/@type = 'Saw'">
<eip:call service="ItemToSawService" operation="transform"/>
<eip:call service="SawMonitorService" operation="display"/>
</eip:when>
<eip:otherwise>
<eip:get-calls-by-xpath base="/items/item"
service="@customMonitorService" operation="'display'"/>
</eip:otherwise>
</eip:choose>
</eip:chain>
</eip:eip>


Bridging up with Business Process Management concepts

And what about full-fledged, business-level orchestration?

Another way of thinking up integration is the top-down approach, where enterprise business processes are defined. In this approach, business processes drive the definition of business services. Thus, a bridge is needed between what services are offered by existing applications and what the business process wants to orchestrate. Such a bridge is manifested in the set of all managed business-level services within the enterprise information system, i.e. its SOA (Service Oriented Architecture), which acts as a protecting layer both for lower-level, technical services on the bus and for the actual business processes.

The standard way of executing processes in the SOA world is the use of a BPEL engine [2]. It can invoke several services and do some business logic on the flow and on XML documents, while also being able to handle data mapping issues. In this approach, business service definitions are the key of the orchestration: no BPEL orchestration can be done without the definition (WSDL typically) of all services, ensuring cleaner (however costlier) service composition.

An overview of the orchestration setup, when using BPEL in an ESB, is available in the article written by Adrien LOUIS, "build an SOA application from existing services" [4].

Human intervention in business processes : workflows

Now what if in our tool monitoring example we'd need a supervisor's approval before actually displaying information in monitoring applications? It would require a manual intervention from a dedicated operator. This is another face of Business Process Management: workflows, which are business processes allowing the involvement of manual, human operation, either for manual business tasks or manual supervision, through a graphical user interface that may be provided within a business portal, or a more technical administration interface.

A key point is that workflows follow the opposite paradigm of state-based approach rather than a flow-based one like BPEL orchestrators, making them better adapted to long-lived processes, without being restricted from sitting on top of orchestrated services. Hence workflow servers are usefully complemented by "straight" orchestrators, though that means deploying two business process-oriented servers – a constraint addressed by interesting new initiatives like jBoss & Bull's "Process Virtual Machine" and the Eclipse Java Workflow Tooling project [5].

Conclusion

We have seen in this article several ways to connect business services with each other, going from low level ones like customized routing, to high-level ones using business oriented approaches like workflow and orchestration. Most importantly, we've exposed how ESB integrators have very common middle-level needs for composing local, technical services, and how a range of "glue", "Swiss knife"-like features allow them to simply "get the job done".

In summary :

  • For a range of simple integration scenarios like the connection between two heterogeneous applications, customizing routing through ESB-specific features, e.g. adapting message data format by adding an XSL transformation in the connectors linked to the application, is actually the easiest way (the interceptor approach).
  • When a strategy is needed to send the message to the right receiver and when operations on messages have to be chained, we can use assemble simple, pattern-oriented integration bricks typically to perform static routings, chained with transformations (the EIP approach).
  • In order to solve complex routing strategies, comprising dynamic routing or complex imbrications, a light orchestration component can be used to centralize the routing logic (the LightOrchestrator approach).

At a global, business level, well managed, consistently defined, business-oriented services are worth the effort of being composed using orchestration like WSDL-based BPEL, and made interact with people using workflow solutions.