Execute Hadoop SQL Query

This operation will execute a given Hadoop SQL Query.

S3 Compatible REST API

Following code is used to execute a Hadoop SQL Query.

java.net.HttpURLConnection is used to create a POST request with "hadoopsql" appended to the URL.

Two samples are displayed here:

executeHadoopSQL

import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;

import org.apache.http.HttpStatus;

public class QueryOperations {
	
		/*
	 	 * This program fetches a given Hadoop SQL Query
		 * @param serverURL: URL of s3 server.
		 * @param token: authorization token.
		 * @param sqlQuery: Standard SQL query
		 */
	public void executeHadoopSQL(String serverURL, String token, String sqlQuery)
			throws Exception {
		URL url = null;
		HttpURLConnection httpCon = null;
		InputStream is = null;
		try {
			/* append "/" at end of serverURL */
			if (!serverURL.endsWith("/")) {
				serverURL = serverURL + "/";
			}
			url = new URL(serverURL + "hadoopsql");	//append "hadoopsql" to the serverURL and create a new URL object

			//Returns a URLConnection object that represents a connection to the remote object referred to by the URL.
			httpCon = (HttpURLConnection) url.openConnection();

			httpCon.setDoOutput(true);	// to use the URL connection for output	
			httpCon.setRequestMethod("POST");	//POST request is used

			httpCon.setRequestProperty("authorization", token);	//provides authorization token for authentication
			
			
			
			httpCon.setRequestProperty("sqlquery", sqlQuery);	//provide SQL Query
			httpCon.setRequestProperty("startindex", "5");	//set start index to read JSON of query
			httpCon.setRequestProperty("maxresults", "7");	//set maximum number of Hadoop SQL Queries that wil be fetched
			httpCon.setRequestProperty("timeoutms", "300");	//set timeout period to fetch a Hadoop SQL Query
			
			httpCon.connect();	//Opens a communications link to the resource referenced by the URL

			if (httpCon.getResponseCode() == HttpStatus.SC_OK) {	//check for OK response
				is = httpCon.getInputStream();
				// Process response here
			}
		} finally {
			try {
				if (is != null)
					is.close();	//close InputStream
			} catch (IOException e) {
				e.printStackTrace();
			}
			if (httpCon != null) {
				httpCon.disconnect();	//close connection
			}
		}
	}
}
	

getHadoopSQLReport

import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;

import org.apache.http.HttpStatus;

public class QueryOperations {
	
		/*
	 	 * This program fetches a given Hadoop SQL Query
		 * @param serverURL: URL of s3 server.
		 * @param token: authorization token.
		 * @param queryId: Id of Hadoop SQL Query whose report will be generated.
		 */
	public static void getHadoopSQLReport(String serverURL, String token, String queryId)
			throws Exception {
		
		URL url = null;
		HttpURLConnection httpCon = null;
		InputStream is = null;
		OutputStream os = null;
		try {
			/* append "/" at end of serverURL */
			if (!serverURL.endsWith("/")) {
				serverURL = serverURL + "/";
			}
			url = new URL(serverURL + "hadoopsql");	//append "hadoopsql" to the serverURL and create a new URL object

			//Returns a URLConnection object that represents a connection to the remote object referred to by the URL.
			httpCon = (HttpURLConnection) url.openConnection();

			httpCon.setDoOutput(true);	// to use the URL connection for output	
			httpCon.setRequestMethod("POST");	//POST request is used

			httpCon.setRequestProperty("authorization", token);	//provides authorization token for authentication
			
			httpCon.setRequestProperty("queryid", queryId);	//Id of Hadoop SQL Query to execute.
			httpCon.setRequestProperty("hadoopsql-report-format", "PDF");	//generate report in PDF format
			
			httpCon.connect();	//Opens a communications link to the resource referenced by the URL
			
			if (httpCon.getResponseCode() == HttpStatus.SC_OK) {	//check for OK response
				
				is = httpCon.getInputStream();
				// Process response here
				StringWriter writer = new StringWriter();                 
				os = new FileOutputStream(new File("/Applications/QueryIO/result.pdf"));	//Copy object to local system
				IOUtils.copy(is, os);
				System.out.println("\nReport saved.");
			}
		} finally {
			try {
				if (is != null)
					is.close();	//close InputStream
				if (os != null)
					os.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
			if (httpCon != null) {
				httpCon.disconnect();	//close connection
			}
		}
	}
}
	


Copyright ©
  • Contact Us
  • Contact Us
  • 2018 QueryIO Corporation. All Rights Reserved.

    QueryIO, "Big Data Intelligence" and the QueryIO Logo are trademarks of QueryIO Corporation. Apache, Hadoop and HDFS are trademarks of The Apache Software Foundation.