gt (string) | context (string) |
---|---|
"" | "package de.mpii.rdf3x;
import java.io.InputStream;
import java.io.Reader;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.Ref;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Map;
// RDF-3X
// (c) 2009 Thomas Neumann. Web site: http://www.mpi-inf.mpg.de/~neumann/rdf3x
//
// This work is licensed under the Creative Commons
// Attribution-Noncommercial-Share Alike 3.0 Unported License. To view a copy
// of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
// or send a letter to Creative Commons, 171 Second Street, Suite 300,
// San Francisco, California, 94105, USA.
public final class ResultSet implements java.sql.ResultSet
{
// The header
private String[] header;
// The data
private String[][] data;
// The current position
private int row;
// The last column
private int lastCol;
// Constructor
ResultSet(String[] header,String[][] data) {
this.header=header;
this.data=data;
row=-1;
}
// Move absolutely
public boolean absolute(int row) {
if (row>0) {
if (row>(data.length+1))
return false;
this.row=row-1;
return true;
} else {
if ((-row)>data.length)
return false;
this.row=data.length-row;
return true;
}
}
// Move after the last entry
public void afterLast() { row=data.length; }
// Move before the first entry
public void beforeFirst() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Cancel all updates
public void cancelRowUpdates() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Clear all warnings
public void clearWarnings() {}
// Releases resources
public void close() { data=null; }
// Deletes the current row
public void deleteRow() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Find a column
public int findColumn(String columnLabel) throws SQLException {
for (int index=0;index<header.length;index++)
if (header[index].equals(columnLabel))
return index+1;
throw new SQLException();
}
// Go to the first entry
public boolean first() {
row=0;
return row<data.length;
}
// Get an entry as array
public Array getArray(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as array
public Array getArray(String columnLabel) throws SQLException { return getArray(findColumn(columnLabel)); }
// Get an entry as ascii stream
public InputStream getAsciiStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as ascii stream
public InputStream getAsciiStream(String columnLabel) throws SQLException { return getAsciiStream(findColumn(columnLabel)); }
// Get an entry as big decimal
public java.math.BigDecimal getBigDecimal(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
/**
* Get an entry as big decimal
* @deprecated
*/
public java.math.BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as big decimal
public java.math.BigDecimal getBigDecimal(String columnLabel) throws SQLException { return getBigDecimal(findColumn(columnLabel)); }
/**
* Get an entry as big decimal.
* @deprecated
*/
public java.math.BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { return getBigDecimal(findColumn(columnLabel),scale); }
// Get an entry as binary stream
public InputStream getBinaryStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as binary stream
public InputStream getBinaryStream(String columnLabel) throws SQLException { return getBinaryStream(findColumn(columnLabel)); }
// Get an entry as blob
public Blob getBlob(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as blob
public Blob getBlob(String columnLabel) throws SQLException { return getBlob(findColumn(columnLabel)); }
// Get an entry as boolean
public boolean getBoolean(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as boolean
public boolean getBoolean(String columnLabel) throws SQLException { return getBoolean(findColumn(columnLabel)); }
// Get an entry as byte
public byte getByte(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as byte
public byte getByte(String columnLabel) throws SQLException { return getByte(findColumn(columnLabel)); }
// Get an entry as bytes
public byte[] getBytes(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as bytes
public byte[] getBytes(String columnLabel) throws SQLException { return getBytes(findColumn(columnLabel)); }
// Get an entry as character stream
public Reader getCharacterStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as character stream
public Reader getCharacterStream(String columnLabel) throws SQLException { return getCharacterStream(findColumn(columnLabel)); }
// Get an entry as clob
public Clob getClob(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as clob
public Clob getClob(String columnLabel) throws SQLException { return getClob(findColumn(columnLabel)); }
// Get the concurrency setting
public int getConcurrency() { return java.sql.ResultSet.CONCUR_READ_ONLY; }
// Get the cursor name
public String getCursorName() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as date
public Date getDate(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as date
public Date getDate(int columnIndex, java.util.Calendar cal) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as date
public Date getDate(String columnLabel) throws SQLException { return getDate(findColumn(columnLabel)); }
// Get an entry as date
public Date getDate(String columnLabel, java.util.Calendar cal) throws SQLException { return getDate(findColumn(columnLabel),cal); }
// Get an entry as double
public double getDouble(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as double
public double getDouble(String columnLabel) throws SQLException { return getDouble(findColumn(columnLabel)); }
// Get the fetch direction
public int getFetchDirection() { return java.sql.ResultSet.FETCH_FORWARD; }
// Get the fetch size
public int getFetchSize() { return 0; }
// Get an entry as float
public float getFloat(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as float
public float getFloat(String columnLabel) throws SQLException { return getFloat(findColumn(columnLabel)); }
// Get the holdability
public int getHoldability() { return java.sql.ResultSet.CLOSE_CURSORS_AT_COMMIT; }
// Get an entry as int
public int getInt(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as int
public int getInt(String columnLabel) throws SQLException { return getInt(findColumn(columnLabel)); }
// Get an entry as long
public long getLong(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as long
public long getLong(String columnLabel) throws SQLException { return getLong(findColumn(columnLabel)); }
// Get the meta data
public java.sql.ResultSetMetaData getMetaData() { return new ResultSetMetaData(header); }
// Get an entry as stream
public Reader getNCharacterStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as stream
public Reader getNCharacterStream(String columnLabel) throws SQLException { return getNCharacterStream(findColumn(columnLabel)); }
// Get an entry as nclob
public NClob getNClob(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as nclob
public NClob getNClob(String columnLabel) throws SQLException { return getNClob(findColumn(columnLabel)); }
// Get an entry as string
public String getNString(int columnIndex) throws SQLException { return getString(columnIndex); }
// Get an entry as string
public String getNString(String columnLabel) throws SQLException { return getNString(findColumn(columnLabel)); }
// Get an entry
public Object getObject(int columnIndex) throws SQLException { return getString(columnIndex); }
// Get an entry
public Object getObject(int columnIndex, Map<String,Class<?>> map) throws SQLException { return getString(columnIndex); }
// Get an entry
public Object getObject(String columnLabel) throws SQLException { return getObject(findColumn(columnLabel)); }
// Get an entry
public Object getObject(String columnLabel, Map<String,Class<?>> map) throws SQLException { return getObject(findColumn(columnLabel),map); }
// Get an entry as ref
public Ref getRef(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as ref
public Ref getRef(String columnLabel) throws SQLException { return getRef(findColumn(columnLabel)); }
// Get the current row number
public int getRow() { return row+1; }
// Get an entry as rowid
public RowId getRowId(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as rowid
public RowId getRowId(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as short
public short getShort(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as short
public short getShort(String columnLabel) throws SQLException { return getShort(findColumn(columnLabel)); }
// Get an entry as SQL
public SQLXML getSQLXML(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as SQL
public SQLXML getSQLXML(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get the corresponding statement
public Statement getStatement() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as string
public String getString(int columnIndex) throws SQLException {
if ((row>=data.length)||(columnIndex<1)||(columnIndex>data[row].length))
throw new SQLException();
String s=data[row][columnIndex-1];
lastCol=columnIndex;
if ("NULL".equals(s))
return null; else
return s;
}
// Get an entry as string
public String getString(String columnLabel) throws SQLException { return getString(findColumn(columnLabel)); }
// Get an entry as time
public Time getTime(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as time
public Time getTime(int columnIndex, java.util.Calendar cal) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as time
public Time getTime(String columnLabel) throws SQLException { return getTime(findColumn(columnLabel)); }
// Get an entry as tme
public Time getTime(String columnLabel, java.util.Calendar cal) throws SQLException { return getTime(findColumn(columnLabel),cal); }
// Get an entry as timestamp
public Timestamp getTimestamp(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as timestamp
public Timestamp getTimestamp(int columnIndex, java.util.Calendar cal) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as timestamp
public Timestamp getTimestamp(String columnLabel) throws SQLException { return getTimestamp(findColumn(columnLabel)); }
// Get an entry as timestamp
public Timestamp getTimestamp(String columnLabel, java.util.Calendar cal) throws SQLException { return getTimestamp(findColumn(columnLabel),cal); }
// Get the type
public int getType() { return java.sql.ResultSet.TYPE_FORWARD_ONLY; }
/**
* Get an entry as unicode stream
* @deprecated
*/
public InputStream getUnicodeStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
/**
* Get an entry as unicode stream
* @deprecated
*/
public InputStream getUnicodeStream(String columnLabel) throws SQLException { return getUnicodeStream(findColumn(columnLabel)); }
// Get an entry as URL
public java.net.URL getURL(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Get an entry as URL
public java.net.URL getURL(String columnLabel) throws SQLException { return getURL(findColumn(columnLabel)); }
// Get warnings
public SQLWarning getWarnings() { return null; }
// Insert a row
public void insertRow() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// After the last row
public boolean isAfterLast() { return row>=data.length; }
// Before the first row
public boolean isBeforeFirst() { return false; }
// Closed
public boolean isClosed() { return data==null; }
// At first row
public boolean isFirst() { return row==0; }
// At last row
public boolean isLast() { return row==(data.length-1); }
// Go to the last row
public boolean last() {
if (data.length>0) {
row=data.length-1;
return true;
} else return false;
}
// Move the cursor
public void moveToCurrentRow() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Move the cursor
public void moveToInsertRow() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Go to the next row
public boolean next() {
if (row>=data.length)
return false;
++row;
return row<data.length;
}
// Go to the previous row
public boolean previous() {
if (row==0)
return false;
--row;
return true;
}
// Refresh the current tow
public void refreshRow() {}
// Move the cursor relatively
public boolean relative(int rows) {
if (rows>=0) {
if (row+rows>=data.length) {
row=data.length;
return false;
} else {
row+=rows;
return true;
}
} else {
if (row+rows<0) {
row=0;
return true;
} else {
row+=rows;
return true;
}
}
}
// Deleted
public boolean rowDeleted() { return false; }
// Inserted
public boolean rowInserted() { return false; }
// Updated
public boolean rowUpdated() { return false; }
// Fetch direction
public void setFetchDirection(int direction) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Fetch size
public void setFetchSize(int rows) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateArray(int columnIndex, Array x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateArray(String columnLabel, Array x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBigDecimal(int columnIndex, java.math.BigDecimal x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBigDecimal(String columnLabel, java.math.BigDecimal x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Updare
public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBlob(int columnIndex, Blob x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBlob(String columnLabel, Blob x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBoolean(int columnIndex, boolean x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBoolean(String columnLabel, boolean x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateByte(int columnIndex, byte x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateByte(String columnLabel, byte x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBytes(int columnIndex, byte[] x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateBytes(String columnLabel, byte[] x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateClob(int columnIndex, Clob x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateClob(int columnIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateClob(String columnLabel, Clob x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateClob(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateDate(int columnIndex, Date x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateDate(String columnLabel, Date x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateDouble(int columnIndex, double x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateDouble(String columnLabel, double x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateFloat(int columnIndex, float x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateFloat(String columnLabel, float x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateInt(int columnIndex, int x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateInt(String columnLabel, int x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateLong(int columnIndex, long x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateLong(String columnLabel, long x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNClob(int columnIndex, NClob nClob) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNClob(int columnIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNClob(String columnLabel, NClob nClob) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNClob(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNString(int columnIndex, String nString) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNString(String columnLabel, String nString) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNull(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateNull(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateObject(int columnIndex, Object x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateObject(String columnLabel, Object x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateRef(int columnIndex, Ref x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateRef(String columnLabel, Ref x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateRow() throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateRowId(int columnIndex, RowId x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateRowId(String columnLabel, RowId x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateShort(int columnIndex, short x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateShort(String columnLabel, short x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateString(int columnIndex, String x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateString(String columnLabel, String x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateTime(int columnIndex, Time x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateTime(String columnLabel, Time x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Update
public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { throw new SQLFeatureNotSupportedException(); }
// Was the last column NULL?
public boolean wasNull() throws SQLException {
return getString(lastCol)==null;
}
// Wrapper?
public boolean isWrapperFor(Class<?> iface) { return false; }
// Unwrap
public <T> T unwrap(Class<T> iface) throws SQLException { throw new SQLException(); }
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
// TODO Auto-generated method stub
return null;
}
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
// TODO Auto-generated method stub
return null;
}
}
" |
"" | "/*
* Copyright 2011 Greg Haines
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.greghaines.jesque.meta.dao.impl;
import static net.greghaines.jesque.utils.ResqueConstants.FAILED;
import static net.greghaines.jesque.utils.ResqueConstants.QUEUE;
import static net.greghaines.jesque.utils.ResqueConstants.QUEUES;
import static net.greghaines.jesque.utils.ResqueConstants.STAT;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import net.greghaines.jesque.Config;
import net.greghaines.jesque.Job;
import net.greghaines.jesque.JobFailure;
import net.greghaines.jesque.json.ObjectMapperFactory;
import net.greghaines.jesque.meta.dao.FailureDAO;
import net.greghaines.jesque.utils.JesqueUtils;
import net.greghaines.jesque.utils.PoolUtils;
import net.greghaines.jesque.utils.PoolUtils.PoolWork;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.util.Pool;
/**
* Accesses failure information about Jesque/Resque from Redis.
*
* @author Greg Haines
*/
public class FailureDAORedisImpl implements FailureDAO {
private final Config config;
private final Pool<Jedis> jedisPool;
/**
* Constructor.
* @param config the Jesque configuration
* @param jedisPool the connection pool to Redis
*/
public FailureDAORedisImpl(final Config config, final Pool<Jedis> jedisPool) {
if (config == null) {
throw new IllegalArgumentException("config must not be null");
}
if (jedisPool == null) {
throw new IllegalArgumentException("jedisPool must not be null");
}
this.config = config;
this.jedisPool = jedisPool;
}
/**
* {@inheritDoc}
*/
@Override
public long getCount() {
return PoolUtils.doWorkInPoolNicely(this.jedisPool, new PoolWork<Jedis, Long>() {
/**
* {@inheritDoc}
*/
@Override
public Long doWork(final Jedis jedis) throws Exception {
final String failedStr = jedis.get(key(STAT, FAILED));
return (failedStr == null) ? 0L : Long.parseLong(failedStr);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public long getFailQueueJobCount() {
return PoolUtils.doWorkInPoolNicely(this.jedisPool, new PoolWork<Jedis, Long>() {
/**
* {@inheritDoc}
*/
@Override
public Long doWork(final Jedis jedis) throws Exception {
return jedis.llen(key(FAILED));
}
});
}
/**
* {@inheritDoc}
*/
@Override
public List<JobFailure> getFailures(final long offset, final long count) {
return PoolUtils.doWorkInPoolNicely(this.jedisPool, new PoolWork<Jedis, List<JobFailure>>() {
/**
* {@inheritDoc}
*/
@Override
public List<JobFailure> doWork(final Jedis jedis) throws Exception {
final List<String> payloads = jedis.lrange(key(FAILED), offset, offset + count - 1);
final List<JobFailure> failures = new ArrayList<JobFailure>(payloads.size());
for (final String payload : payloads) {
if (payload.charAt(0) == '{') { // Ignore non-JSON strings
failures.add(ObjectMapperFactory.get().readValue(payload, JobFailure.class));
}
}
return failures;
}
});
}
/**
* {@inheritDoc}
*/
@Override
public void clear() {
PoolUtils.doWorkInPoolNicely(this.jedisPool, new PoolWork<Jedis, Void>() {
/**
* {@inheritDoc}
*/
@Override
public Void doWork(final Jedis jedis) throws Exception {
jedis.del(key(FAILED));
return null;
}
});
}
/**
* {@inheritDoc}
*/
@Override
public Date requeue(final long index) {
Date retryDate = null;
final List<JobFailure> failures = getFailures(index, 1);
if (!failures.isEmpty()) {
retryDate = PoolUtils.doWorkInPoolNicely(this.jedisPool, new PoolWork<Jedis, Date>() {
/**
* {@inheritDoc}
*/
@Override
public Date doWork(final Jedis jedis) throws Exception {
final Date retriedAt = new Date();
final JobFailure failure = failures.get(0);
failure.setRetriedAt(retriedAt);
jedis.lset(key(FAILED), index, ObjectMapperFactory.get().writeValueAsString(failure));
enqueue(jedis, failure.getQueue(), failure.getPayload());
return retriedAt;
}
});
}
return retryDate;
}
/**
* {@inheritDoc}
*/
@Override
public void remove(final long index) {
PoolUtils.doWorkInPoolNicely(this.jedisPool, new PoolWork<Jedis, Void>() {
/**
* {@inheritDoc}
*/
@Override
public Void doWork(final Jedis jedis) throws Exception {
final String failedKey = key(FAILED);
final String randId = UUID.randomUUID().toString();
jedis.lset(failedKey, index, randId);
jedis.lrem(failedKey, 1, randId);
return null;
}
});
}
protected void enqueue(final Jedis jedis, final String queue, final Job job) throws IOException {
if (queue == null || "".equals(queue)) {
throw new IllegalArgumentException("queue must not be null or empty: " + queue);
}
if (job == null) {
throw new IllegalArgumentException("job must not be null");
}
if (!job.isValid()) {
throw new IllegalStateException("job is not valid: " + job);
}
final String msg = ObjectMapperFactory.get().writeValueAsString(job);
jedis.sadd(key(QUEUES), queue);
jedis.rpush(key(QUEUE, queue), msg);
}
/**
* Builds a namespaced Redis key with the given arguments.
*
* @param parts
* the key parts to be joined
* @return an assembled String key
*/
private String key(final String... parts) {
return JesqueUtils.createKey(this.config.getNamespace(), parts);
}
}
" |
"" | "/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package com.sun.speech.freetts;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.io.Reader;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.Text;
import com.sun.speech.freetts.audio.AudioPlayer;
import com.sun.speech.freetts.lexicon.Lexicon;
import com.sun.speech.freetts.relp.LPCResult;
import com.sun.speech.freetts.util.BulkTimer;
import com.sun.speech.freetts.util.Utilities;
/**
* Performs text-to-speech using a series of
* <code>UtteranceProcessors</code>. It is the main conduit to the FreeTTS
* speech synthesizer. It can perform TTS on ASCII text,
* a JSML document, an <code>InputStream</code>, or a
* <code>FreeTTSSpeakable</code>, by invoking the method <code>speak</code>.
*
* <p>Before a Voice can perform TTS, it must have a
* <code>Lexicon</code>, from which it gets the vocabulary, and
* an <code>AudioPlayer</code>, to which it sends the synthesized output.
*
* <p><b>Example</b> (using the <code>CMUDiphoneVoice</code>,
* <code>CMULexicon</code> and <code>JavaClipAudioPlayer</code>):
*
* <pre>
* Voice voice = new CMUDiphoneVoice();
*
* // sets the Lexicon
* voice.setLexicon(new CMULexicon());
*
* // sets the AudioPlayer
* voice.setAudioPlayer(new JavaClipAudioPlayer());
*
* // loads the Voice
* voice.allocate();
*
* // start talking
* voice.speak("I can talk forever without getting tired!");
* </pre>
*
*
* <p>A user can override the AudioPlayer to use by defining the
* "com.sun.speech.freetts.voice.defaultAudioPlayer" system property.
* The value of this property must be the name of a class that
* implements the AudioPlayer interface, and which also has a no-arg
* constructor.
*
* @see VoiceManager
* @see VoiceDirectory
*/
public abstract class Voice implements UtteranceProcessor, Dumpable {
/** Logger instance. */
private static final Logger LOGGER =
Logger.getLogger(Voice.class.getName());
/**
* Constant that describes the name of the unit database used by
* this voice.
*/
public final static String DATABASE_NAME = "databaseName";
private List utteranceProcessors;
private Map featureProcessors;
private FeatureSetImpl features;
private boolean metrics = false;
private boolean detailedMetrics = false;
private boolean dumpUtterance = false;
private boolean dumpRelations = false;
private String runTitle = "unnamed run";
private Lexicon lexicon = null;
private AudioPlayer defaultAudioPlayer = null;
private AudioPlayer audioPlayer = null;
private UtteranceProcessor audioOutput;
private OutputQueue outputQueue = null;
private String waveDumpFile = null;
private BulkTimer runTimer = new BulkTimer();
private BulkTimer threadTimer = new BulkTimer();
private boolean externalOutputQueue = false;
private boolean externalAudioPlayer = false;
private float nominalRate = 150; // nominal speaking rate for this voice
private float pitch = 100; // pitch baseline (hertz)
private float range = 10; // pitch range (hertz)
private float pitchShift = 1; // F0 Shift
private float volume = 0.8f; // the volume (range 0 to 1)
private float durationStretch = 1f; // the duration stretch
private boolean loaded = false;
private String name = "default_name";
private Age age = Age.DONT_CARE;
private Gender gender = Gender.DONT_CARE;
private String description = "default description";
private Locale locale = Locale.getDefault();
private String domain = "general";
private String style = "standard";
private String organization = "unknown";
/**
* Prefix for System property names.
*/
public final static String PROP_PREFIX = "com.sun.speech.freetts.voice.";
/**
* Feature name for the silence phone string.
*/
public final static String FEATURE_SILENCE = "silence";
/**
* Feature name for the join type string.
*/
public final static String FEATURE_JOIN_TYPE = "join_type";
/**
* Feature name for the default AudioPlayer class to use.
*/
public final static String DEFAULT_AUDIO_PLAYER =
PROP_PREFIX + "defaultAudioPlayer";
/**
* The default class to use for the DEFAULT_AUDIO_PLAYER.
*/
public final static String DEFAULT_AUDIO_PLAYER_DEFAULT =
"com.sun.speech.freetts.audio.JavaStreamingAudioPlayer";
/**
* Creates a new Voice. Utterances are sent to an
* output queue to be rendered as audio. Utterances are placed
* on the queue by an output thread. This
* queue is usually created via a call to 'createOutputThread,'
* which creates a thread that waits on the queue and sends the
* output to the audio player associated with this voice. If
* the queue is null, the output is rendered in the calling
* thread.
*
* @see #createOutputThread
*/
public Voice() {
/* Make the utteranceProcessors a synchronized list to avoid
* some threading issues.
*/
utteranceProcessors = Collections.synchronizedList(new ArrayList());
features = new FeatureSetImpl();
featureProcessors = new HashMap();
try {
nominalRate = Float.parseFloat(
Utilities.getProperty(PROP_PREFIX + "speakingRate","150"));
pitch = Float.parseFloat(
Utilities.getProperty(PROP_PREFIX + "pitch","100"));
range = Float.parseFloat(
Utilities.getProperty(PROP_PREFIX + "range","10"));
volume = Float.parseFloat(
Utilities.getProperty(PROP_PREFIX + "volume","1.0"));
} catch (SecurityException se) {
// can't get properties, just use defaults
}
outputQueue = null;
audioPlayer = null;
defaultAudioPlayer = null;
}
/**
* Creates a new Voice like above, except that it also
* stores the properties of the voice.
* @param name the name of the voice
* @param gender the gender of the voice
* @param age the age of the voice
* @param description a human-readable string providing a
* description that can be displayed for the users.
* @param locale the locale of the voice
* @param domain the domain of this voice. For example,
* @param organization the organization which created the voice
* "general", "time", or
* "weather".
*
* @see #Voice()
*/
public Voice(String name, Gender gender, Age age,
String description, Locale locale, String domain,
String organization) {
this();
setName(name);
setGender(gender);
setAge(age);
setDescription(description);
setLocale(locale);
setDomain(domain);
setOrganization(organization);
}
/**
* Speaks the given text.
*
* @param text the text to speak
*
* @return <code>true</code> if the given text is spoken properly;
* otherwise <code>false</code>
*/
public boolean speak(String text) {
return speak(new FreeTTSSpeakableImpl(text));
}
/**
* Speaks the given document.
*
* @param doc the JSML document to speak
*
* @return <code>true</code> if the given document is spoken properly;
* otherwise <code>false</code>
*/
public boolean speak(Document doc) {
return speak(new FreeTTSSpeakableImpl(doc));
}
/**
* Speaks the input stream.
*
* @param inputStream the inputStream to speak
*
* @return <code>true</code> if the given input stream is spoken properly;
* otherwise <code>false</code>
*/
public boolean speak(InputStream inputStream) {
return speak(new FreeTTSSpeakableImpl(inputStream));
}
/**
* Speak the given queue item. This is a synchronous method that
* does not return until the speakable is completely
* spoken or has been cancelled.
*
* @param speakable the item to speak
*
* @return <code>true</code> if the utterance was spoken properly,
* <code>false</code> otherwise
*/
public boolean speak(FreeTTSSpeakable speakable) {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("speak(FreeTTSSpeakable) called");
}
boolean ok = true;
boolean posted = false;
getAudioPlayer().startFirstSampleTimer();
for (Iterator i = tokenize(speakable);
!speakable.isCompleted() && i.hasNext() ; ) {
try {
Utterance utterance = (Utterance) i.next();
if (utterance != null) {
processUtterance(utterance);
posted = true;
}
} catch (ProcessException pe) {
ok = false;
}
}
if (ok && posted) {
runTimer.start("WaitAudio");
ok = speakable.waitCompleted();
runTimer.stop("WaitAudio");
}
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("speak(FreeTTSSpeakable) completed");
}
return ok;
}
/**
* @deprecated As of FreeTTS 1.2, replaced by {@link #allocate}.
*/
public void load() {
allocate();
}
/**
* Allocate this Voice. It loads the lexicon and the
* audio output handler, and creates an audio output thread by
* invoking <code>createOutputThread()</code>, if
* one is not already created. It then calls the <code>loader()</code>
* method to load Voice-specific data, which include utterance processors.
*/
public void allocate() {
if (isLoaded()) {
return;
}
BulkTimer.LOAD.start();
if (!lexicon.isLoaded()) {
try {
lexicon.load();
} catch (IOException ioe) {
LOGGER.severe("Can't load voice " + ioe);
throw new Error(ioe);
}
}
try {
audioOutput = getAudioOutput();
} catch (IOException ioe) {
LOGGER.severe("Can't load audio output handler for voice " + ioe);
throw new Error(ioe);
}
if (outputQueue == null) {
outputQueue = createOutputThread();
}
try {
loader();
} catch (IOException ioe) {
LOGGER.severe("Can't load voice " + ioe);
throw new Error(ioe);
}
BulkTimer.LOAD.stop();
if (isMetrics()) {
BulkTimer.LOAD.show("loading " + toString() + " for " +
getRunTitle());
}
setLoaded(true);
}
/**
* Returns true if this voice is loaded.
*
* @return <code>true</code> if the voice is loaded;
* otherwise <code>false</code>
*/
public boolean isLoaded() {
return loaded;
}
/**
* Sets the loaded state
*
* @param loaded the new loaded state
* otherwise <code>false</code>
*/
protected void setLoaded(boolean loaded) {
this.loaded = loaded;
}
/**
* Processes the given Utterance by passing it to each
* UtteranceProcessor managed by this Voice. The
* UtteranceProcessors are called in the order they were added to
* the Voice.
*
* @param u the Utterance to process
*
* @throws ProcessException if an exception occurred while performing
* operations on the Utterance
*/
public void processUtterance(Utterance u) throws ProcessException {
UtteranceProcessor[] processors;
if (utteranceProcessors == null) {
return;
}
if (u == null) {
throw new ProcessException("Utterance is null.");
}
runTimer.start("processing");
processors = new UtteranceProcessor[utteranceProcessors.size()];
processors = (UtteranceProcessor[])
utteranceProcessors.toArray(processors);
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Processing Utterance: " + u.getString("input_text"));
}
try {
for (int i = 0; i < processors.length &&
!u.getSpeakable().isCompleted(); i++) {
runProcessor(processors[i], u, runTimer);
}
if (!u.getSpeakable().isCompleted()) {
if (outputQueue == null) {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("To AudioOutput");
}
outputUtterance(u, runTimer);
} else {
runTimer.start("..post");
outputQueue.post(u);
runTimer.stop("..post");
}
}
} catch (ProcessException pe) {
System.err.println("Processing Utterance: " + pe);
} catch (Exception e) {
System.err.println("Trouble while processing utterance " + e);
e.printStackTrace();
u.getSpeakable().cancelled();
}
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Done Processing Utterance: "
+ u.getString("input_text"));
}
runTimer.stop("processing");
if (dumpUtterance) {
u.dump("Utterance");
}
if (dumpRelations) {
u.dumpRelations("Utterance");
}
dumpASCII(u);
}
/**
* Dumps the wave for the given utterance.
*
* @param utterance the utterance of interest
*/
private void dumpASCII(Utterance utterance) {
if (waveDumpFile != null) {
LPCResult lpcResult =
(LPCResult) utterance.getObject("target_lpcres");
try {
if (waveDumpFile.equals("-")) {
lpcResult.dumpASCII();
} else {
lpcResult.dumpASCII(waveDumpFile);
}
} catch (IOException ioe) {
LOGGER.severe("Can't dump file to " + waveDumpFile + " " + ioe);
throw new Error(ioe);
}
}
}
/**
* Creates an output thread that will asynchronously
* output utterances that are generated by this voice (and other
* voices).
*
* @return the queue where utterances should be placed.
*/
public static OutputQueue createOutputThread() {
final OutputQueue queue = new OutputQueue();
Thread t = new Thread() {
public void run() {
Utterance utterance = null;
do {
utterance = queue.pend();
if (utterance != null) {
Voice voice = utterance.getVoice();
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("OUT: "
+ utterance.getString("input_text"));
}
voice.outputUtterance(utterance, voice.threadTimer);
}
} while (utterance != null);
}
};
t.setDaemon(true);
t.start();
return queue;
}
/**
* Sends the given utterance to the audio output processor
* associated with this voice. If the queue item associated with
* this utterance is completed, then this set of utterances has
* been cancelled or otherwise aborted and the utterance should
* not be output.
*
* @param utterance the utterance to be output
* @param timer the timer for gathering performance metrics
*
* @return true if the utterance was output properly; otherwise
* false
*/
private boolean outputUtterance(Utterance utterance, BulkTimer timer) {
boolean ok = true;
FreeTTSSpeakable speakable = utterance.getSpeakable();
if (!speakable.isCompleted()) {
if (utterance.isFirst()) {
getAudioPlayer().reset();
speakable.started();
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine(" --- started ---");
}
}
// log(" utt: " + utterance.getString("input_text"));
try {
if (!speakable.isCompleted()) {
runProcessor(audioOutput, utterance, timer);
} else {
ok = false;
}
} catch (ProcessException pe) {
ok = false;
}
if (ok && utterance.isLast()) {
getAudioPlayer().drain();
speakable.completed();
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine(" --- completed ---");
}
} else if (!ok) {
// getAudioPlayer().drain();
speakable.cancelled();
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine(" --- cancelled ---");
}
} else {
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine(" --- not last: " + speakable.getText()
+ " --- ");
}
}
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Calling speakable.completed() on "
+ speakable.getText());
}
} else {
ok = false;
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("STRANGE: speakable already completed: "
+ speakable.getText());
}
}
return ok;
}
/**
* Runs the given utterance processor.
*
* @param processor the processor to run. If the processor
* is null, it is ignored
* @param utterance the utterance to process
*
* @throws ProcessException if an exceptin occurs while processing
* the utterance
*/
private void runProcessor(UtteranceProcessor processor,
Utterance utterance, BulkTimer timer)
throws ProcessException {
if (processor != null) {
String processorName = ".." + processor.toString();
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine(" Running " + processorName);
}
timer.start(processorName);
processor.processUtterance(utterance);
timer.stop(processorName);
}
}
/**
* Returns the tokenizer associated with this voice.
*
* @return the tokenizer
*/
public abstract Tokenizer getTokenizer();
/**
* Return the list of UtteranceProcessor instances. Applications
* should use this to obtain and modify the contents of the
* UtteranceProcessor list.
*
* @return a List containing UtteranceProcessor instances
*/
public List getUtteranceProcessors() {
return utteranceProcessors;
}
/**
* Returns the feature set associated with this voice.
*
* @return the feature set.
*/
public FeatureSet getFeatures() {
return features;
}
/**
* Starts a batch of utterances. Utterances are sometimes
* batched in groups for timing purposes.
*
* @see #endBatch
*/
public void startBatch() {
runTimer.setVerbose(detailedMetrics);
runTimer.start();
}
/**
* Ends a batch of utterances.
*
* @see #startBatch
*/
public void endBatch() {
runTimer.stop();
if (metrics) {
runTimer.show(getRunTitle() + " run");
threadTimer.show(getRunTitle() + " thread");
getAudioPlayer().showMetrics();
long totalMemory = Runtime.getRuntime().totalMemory();
LOGGER.info
("Memory Use : "
+ (totalMemory - Runtime.getRuntime().freeMemory()) / 1024
+ "k of " + totalMemory / 1024 + "k");
}
}
/**
* Sets the output queue for this voice. If no output queue is set
* for the voice when the voice is loaded, a queue and thread will
* be created when the voice is loaded. If the outputQueue is set
* by an external entity by calling setOutputQueue, the caller is
* responsible for shutting down the output thread. That is, if
* you call 'setOutputQueue' then you are responsible for shutting
* down the output thread on your own. This is necessary since the
* output queue may be shared by a number of voices.
*
* <p>Utterances are placed on the
* queue to be output by an output thread. This queue is
* usually created via a call to 'createOutputThread' which
* creates a thread that waits on the queue and sends the
* output to the audio player associated with this voice. If
* the queue is null, the output is rendered in the calling
* thread.
*
* @param queue the output queue
*/
public void setOutputQueue(OutputQueue queue) {
externalOutputQueue = true;
outputQueue = queue;
}
/**
* Returns the output queue associated with this voice.
*
* @return the output queue associated with this voice
*/
public OutputQueue getOutputQueue() {
return outputQueue;
}
/**
* Loads voice specific data. Subclasses of voice should
* implement this to perform class specific loading.
*/
protected abstract void loader() throws IOException;
/**
* tokenizes the given the queue item.
*
* @return an iterator that will yield a series of utterances
*/
private Iterator tokenize(FreeTTSSpeakable speakable) {
return new FreeTTSSpeakableTokenizer(speakable).iterator();
}
/**
* Converts the document to a string (a placeholder for more
* sophisticated logic to be done).
*
* @param dom the jsml document
*
* @return the document as a string.
*/
private String documentToString(Document dom) {
StringBuffer buf = new StringBuffer();
linearize(dom, buf);
return buf.toString();
}
/**
* Appends the text for this node to the given StringBuffer.
*
* @param n the node to traverse in depth-first order
* @param buf the buffer to append text to
*/
private void linearize(Node n, StringBuffer buf) {
StringBuffer endText = processNode(n, buf);
for (Node child = n.getFirstChild();
child != null;
child = child.getNextSibling()) {
linearize(child, buf);
}
if (endText != null) {
buf.append(endText);
}
}
/**
* Adds text for just this node and returns any text that might
* be needed to undo the effects of this node after it is
* processed.
*
* @param n the node to traverse in depth-first order
* @param buf the buffer to append text to
*
* @return a <code>String</code> containing text to undo the
* effects of the node
*/
protected StringBuffer processNode(Node n, StringBuffer buf) {
StringBuffer endText = null;
int type = n.getNodeType();
switch (type) {
case Node.ATTRIBUTE_NODE:
break;
case Node.DOCUMENT_NODE:
break;
case Node.ELEMENT_NODE:
// endText = processElement((Element) n, buf);
break;
case Node.TEXT_NODE:
buf.append(((Text) n).getData());
break;
// Pass processing instructions (e.g., <?blah?>
// right on to the synthesizer. These types of things
// probably should not be used. Instead the 'engine'
// element is probably the best thing to do.
//
case Node.PROCESSING_INSTRUCTION_NODE:
break;
// The document type had better be JSML.
//
case Node.DOCUMENT_TYPE_NODE:
break;
// I think NOTATION nodes are only DTD's.
//
case Node.NOTATION_NODE:
break;
// Should not get COMMENTS because the JSMLParser
// ignores them.
//
case Node.COMMENT_NODE:
break;
// Should not get CDATA because the JSMLParser is
// coalescing.
//
case Node.CDATA_SECTION_NODE:
break;
// Should not get ENTITY related notes because
// entities are expanded by the JSMLParser
//
case Node.ENTITY_NODE:
case Node.ENTITY_REFERENCE_NODE:
break;
// Should not get DOCUMENT_FRAGMENT nodes because I
// [[[WDW]]] think they are only created via the API's
// and cannot be defined via content.
//
case Node.DOCUMENT_FRAGMENT_NODE:
break;
default:
break;
}
return endText;
}
/**
* Dumps the voice in textual form.
*
* @param output where to send the formatted output
* @param pad the initial padding
* @param title the title to print when dumping out
*/
public void dump(PrintWriter output, int pad, String title) {
Utilities.dump(output, pad, title);
features.dump(output, pad + 4, title + " Features");
dumpProcessors(output, pad + 4, title + " Processors");
}
/**
* Dumps the voice processors.
*
* @param output where to send the formatted output
* @param pad the initial padding
* @param title the title to print when dumping out
*/
public void dumpProcessors(PrintWriter output, int pad, String title) {
UtteranceProcessor[] processors;
if (utteranceProcessors == null) {
return;
}
processors = new UtteranceProcessor[utteranceProcessors.size()];
processors = (UtteranceProcessor[])
utteranceProcessors.toArray(processors);
Utilities.dump(output, pad, title);
for (int i = 0; i < processors.length; i++) {
Utilities.dump(output, pad + 4, processors[i].toString());
}
}
/**
* Returns a language/voice specific Feature Processor.
*
* @param name the name of the processor
*
* @return the processor associated with the name or null if none
* could be found
*/
public FeatureProcessor getFeatureProcessor(String name) {
return (FeatureProcessor) featureProcessors.get(name);
}
/**
* Adds a language/voice specific Feature Processor to the set of
* FeatureProcessors supported by this voice.
*
* @param name the name of the processor
* @param fp the processor
*/
public void addFeatureProcessor(String name, FeatureProcessor fp) {
featureProcessors.put(name, fp);
}
/**
* Gets the state of the metrics mode.
*
* @return true if metrics mode is on
*/
public boolean isMetrics() {
return metrics;
}
/**
* Sets the metrics mode.
*
* @param metrics true if metrics mode should be on
*/
public void setMetrics(boolean metrics) {
this.metrics = metrics;
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Metrics mode is " + metrics);
}
}
/**
* Gets the state of the detailedMetrics mode.
*
* @return true if detailedMetrics mode is on
*/
public boolean isDetailedMetrics() {
return detailedMetrics;
}
/**
* Sets the state of the detailedMetrics mode.
*
* @param detailedMetrics true if detailedMetrics mode should be on
*/
public void setDetailedMetrics(boolean detailedMetrics) {
this.detailedMetrics = detailedMetrics;
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("DetailedMetrics mode is " + detailedMetrics);
}
}
/**
* Gets the state of the dumpUtterance mode.
*
* @return true if dumpUtterance mode is on
*/
public boolean isDumpUtterance() {
return dumpUtterance;
}
/**
* Sets the state of the dumpUtterance mode.
*
* @param dumpUtterance true if dumpUtterance mode should be on
*/
public void setDumpUtterance(boolean dumpUtterance) {
this.dumpUtterance = dumpUtterance;
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("DumpUtterance mode is " + dumpUtterance);
}
}
/**
* Gets the state of the dumpRelations mode.
*
* @return true if dumpRelations mode is on
*/
public boolean isDumpRelations() {
return dumpRelations;
}
/**
* Sets the state of the dumpRelations mode.
*
* @param dumpRelations true if dumpRelations mode should be on
*/
public void setDumpRelations(boolean dumpRelations) {
this.dumpRelations = dumpRelations;
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("DumpRelations mode is " + dumpRelations);
}
}
/**
* Sets the title for this run.
*
* @param runTitle the title for the run
*/
public void setRunTitle(String runTitle) {
this.runTitle = runTitle;
}
/**
* Gets the title for this run.
*
* @return the title for the run
*/
public String getRunTitle() {
return runTitle;
}
/**
* Given a phoneme and a feature name, returns the feature.
*
* @param phone the phoneme of interest
* @param featureName the name of the feature of interest
*
* @return the feature with the given name
*/
public String getPhoneFeature(String phone, String featureName) {
return null;
}
/**
* Shuts down the voice processing.
*/
public void deallocate() {
setLoaded(false);
if (!externalAudioPlayer) {
if (audioPlayer != null) {
audioPlayer.close();
audioPlayer = null;
}
}
if (!externalOutputQueue) {
outputQueue.close();
}
}
/**
* Sets the baseline pitch.
*
* @param hertz the baseline pitch in hertz
*/
public void setPitch(float hertz) {
this.pitch = hertz;
}
/**
* Retreives the baseline pitch.
*
* @return the baseline pitch in hertz
*/
public float getPitch() {
return pitch;
}
/**
* Sets the pitch range.
*
* @param range the range in hertz
*/
public void setPitchRange(float range) {
this.range = range;
}
/**
* Gets the pitch range.
*
* @return the range in hertz
*/
public float getPitchRange() {
return range;
}
/**
* Sets the pitch shift
*
* @param shift the pitch shift (1.0 is no shift)
*/
public void setPitchShift(float shift) {
this.pitchShift = shift;
}
/**
* Gets the pitch shift.
*
* @return the pitch shift
*/
public float getPitchShift() {
return pitchShift;
}
/**
* Sets the duration stretch
*
* @param stretch the duration stretch (1.0 is no stretch)
*/
public void setDurationStretch(float stretch) {
this.durationStretch = stretch;
}
/**
* Gets the duration Stretch
*
* @return the duration stretch
*/
public float getDurationStretch() {
return durationStretch;
}
/**
* Sets the rate of speech.
*
* @param wpm words per minute
*/
public void setRate(float wpm) {
if (wpm > 0 && wpm < 1000) {
setDurationStretch(nominalRate / wpm);
}
}
/**
* Gets the rate of speech.
*
* @return words per minute
*/
public float getRate() {
return durationStretch * nominalRate;
}
/**
* Sets the volume.
*
* @param vol the volume (0 to 1.0)
*/
public void setVolume(float vol) {
volume = vol;
}
/**
* Gets the volume.
*
* @return the volume (0 to 1.0)
*/
public float getVolume() {
return volume;
}
/**
* Gets the lexicon for this voice.
*
* @return the lexicon (or null if there is no lexicon)
*/
public Lexicon getLexicon() {
return lexicon;
}
/**
* Sets the lexicon to be used by this voice.
*
* @param lexicon the lexicon to use
*/
public void setLexicon(Lexicon lexicon) {
this.lexicon = lexicon;
}
/**
* Sets the dumpfile for this voice.
*
* @param waveDumpFile the dumpfile
*/
public void setWaveDumpFile(String waveDumpFile) {
this.waveDumpFile = waveDumpFile;
}
/**
* Gets the dumpfile for this voice.
*
* @return the dumpfile
*/
public String getWaveDumpFile() {
return waveDumpFile;
}
/**
* Sets the audio player associated with this voice. The caller is
* responsible for closing this player.
*
* @param player the audio player
*/
public void setAudioPlayer(AudioPlayer player) {
audioPlayer = player;
externalAudioPlayer = true;
}
/**
* Gets the default audio player for this voice. The return
* value will be non-null only if the DEFAULT_AUDIO_PLAYER
* system property has been set to the name of an AudioPlayer
* class, and that class is able to be instantiated via a
* no arg constructor. getAudioPlayer will automatically set
* the audio player for this voice to the default audio player
* if the audio player has not yet been set.
*
* @see #DEFAULT_AUDIO_PLAYER
* @see #getAudioPlayer
* @return the default AudioPlayer
*/
public AudioPlayer getDefaultAudioPlayer() throws InstantiationException {
if (defaultAudioPlayer != null) {
return defaultAudioPlayer;
}
String className = Utilities.getProperty(
DEFAULT_AUDIO_PLAYER, DEFAULT_AUDIO_PLAYER_DEFAULT);
try {
Class cls = Class.forName(className);
defaultAudioPlayer = (AudioPlayer) cls.newInstance();
return defaultAudioPlayer;
} catch (ClassNotFoundException e) {
throw new InstantiationException("Can't find class " + className);
} catch (IllegalAccessException e) {
throw new InstantiationException("Can't find class " + className);
} catch (ClassCastException e) {
throw new InstantiationException(className + " cannot be cast "
+ "to AudioPlayer");
}
}
/**
* Gets the audio player associated with this voice. If the
* audio player has not yet been set, the value will default
* to the return value of getDefaultAudioPlayer.
*
* @see #getDefaultAudioPlayer
* @return the audio player
*/
public AudioPlayer getAudioPlayer() {
if (audioPlayer == null) {
try {
audioPlayer = getDefaultAudioPlayer();
} catch (InstantiationException e) {
e.printStackTrace();
}
}
return audioPlayer;
}
/**
* Get a resource for this voice.
* By default, the voice is searched for in the package
* to which the voice class belongs. Subclasses are free to
* override this behaviour.
*/
protected URL getResource(String resource) {
return this.getClass().getResource(resource);
}
/**
* Set the name of this voice.
* [[[TODO: any standard format to the name?]]]
*
* @param name the name to assign this voice
*/
protected void setName(String name) {
this.name = name;
}
/**
* Get the name of this voice.
*
* @return the name
*/
public String getName() {
return name;
}
/**
* Returns the name of this Voice.
*
* @return the name of this Voice
*/
public String toString() {
return getName();
}
/**
* Set the gender of this voice.
*
* @param gender the gender to assign
*/
protected void setGender(Gender gender) {
this.gender = gender;
}
/**
* Get the gender of this voice.
*
* @return the gender of this voice
*/
public Gender getGender() {
return gender;
}
/**
* Set the age of this voice.
*
* @param age the age to assign
*/
protected void setAge(Age age) {
this.age = age;
}
/**
* Get the age of this voice.
*
* @return the age of this voice
*/
public Age getAge() {
return age;
}
/**
* Set the description of this voice.
*
* @param description the human readable description to assign
*/
protected void setDescription(String description) {
this.description = description;
}
/**
* Get the description of this voice.
*
* @return the human readable description of this voice
*/
public String getDescription() {
return description;
}
/**
* Set the locale of this voice.
*
* @param locale the locale of this voice.
*/
protected void setLocale(Locale locale) {
this.locale = locale;
}
/**
* Get the locale of this voice.
*
* @return the locale of this voice.
*/
public Locale getLocale() {
return locale;
}
/**
* Set the domain of this voice.
*
* @param domain the domain of this voice. For example,
* "general", "time", or
* "weather".
*/
protected void setDomain(String domain) {
this.domain = domain;
}
/**
* Get the domain of this voice.
*
* @return the domain of this voice. For example,
* "general", "time", or
* "weather".
*/
public String getDomain() {
return domain;
}
/**
* Sets the voice style. This parameter is designed for human
* interpretation. Values might include "business", "casual",
* "robotic", "breathy"
*
* @param style the stile of this voice.
*/
public void setStyle(String style) {
this.style = style;
}
/**
* Gets the voice style. This parameter is designed for human
* interpretation. Values might include "business", "casual",
* "robotic", "breathy".
*/
public String getStyle() {
return style;
}
/**
* Sets the organization which created this voice. For example
* "cmu", "sun", ...
*
* @param organization the name of the organization
*/
protected void setOrganization(String organization) {
this.organization = organization;
}
/**
* Gets the organization which created this voice. For example
* "cmu", "sun", ...
*
* @return the name of the organization
*/
public String getOrganization() {
return organization;
}
/**
* Returns the AudioOutput processor to be used by this voice.
* Derived voices typically override this to customize behaviors.
*
* @return the audio output processor
*
* @throws IOException if an IO error occurs while getting
* processor
*/
protected abstract UtteranceProcessor getAudioOutput() throws IOException ;
/**
* Tokenizes a FreeTTSSpeakable
*/
private class FreeTTSSpeakableTokenizer {
FreeTTSSpeakable speakable;
Tokenizer tok = getTokenizer();
/**
* Constructor.
*
* @param speakable the queue item to be pretokenized
*/
public FreeTTSSpeakableTokenizer(FreeTTSSpeakable speakable) {
this.speakable = speakable;
if (speakable.isPlainText()) {
tok.setInputText(speakable.getText());
} else if (speakable.isStream()) {
Reader reader = new BufferedReader(
new InputStreamReader(speakable.getInputStream()));
tok.setInputReader(reader);
} else if (speakable.isDocument()) {
tok.setInputText(documentToString(speakable.getDocument()));
}
}
/**
* Returns an iterator for this text item.
*/
public Iterator iterator() {
return new Iterator() {
boolean first = true;
Token savedToken = null;
/**
* Determines if there are more utterances
*
* @return true if there are more tokens
*/
public boolean hasNext() {
return savedToken != null || tok.hasMoreTokens();
}
/**
* Returns the next utterance.
*
* @return the next utterance (as an object) or
* null if there is are no utterances left
*/
public Object next() {
ArrayList tokenList = new ArrayList();
Utterance utterance = null;
if (savedToken != null) {
tokenList.add(savedToken);
savedToken = null;
}
while (tok.hasMoreTokens()) {
Token token = tok.getNextToken();
if ((token.getWord().length() == 0) ||
(tokenList.size() > 500) ||
tok.isBreak()) {
savedToken = token;
break;
}
tokenList.add(token);
}
utterance = new Utterance(Voice.this, tokenList);
utterance.setSpeakable(speakable);
utterance.setFirst(first);
first = false;
boolean isLast =
(!tok.hasMoreTokens() &&
(savedToken == null ||
savedToken.getWord().length() == 0));
utterance.setLast(isLast);
return utterance;
}
public void remove() {
throw new UnsupportedOperationException("remove");
}
};
}
}
}
" |
"" | "/*
* ====================================================================
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package org.apache.http.impl.cookie;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.http.annotation.NotThreadSafe;
import org.apache.http.Header;
import org.apache.http.HeaderElement;
import org.apache.http.NameValuePair;
import org.apache.http.cookie.ClientCookie;
import org.apache.http.cookie.Cookie;
import org.apache.http.cookie.CookieAttributeHandler;
import org.apache.http.cookie.CookieOrigin;
import org.apache.http.cookie.CookieSpec;
import org.apache.http.cookie.MalformedCookieException;
import org.apache.http.cookie.SM;
import org.apache.http.message.BufferedHeader;
import org.apache.http.util.CharArrayBuffer;
/**
* RFC 2965 compliant {@link CookieSpec} implementation.
*
* @since 4.0
*/
@NotThreadSafe // superclass is @NotThreadSafe
public class RFC2965Spec extends RFC2109Spec {
/**
* Default constructor
*
*/
public RFC2965Spec() {
this(null, false);
}
public RFC2965Spec(final String[] datepatterns, boolean oneHeader) {
super(datepatterns, oneHeader);
registerAttribHandler(ClientCookie.DOMAIN_ATTR, new RFC2965DomainAttributeHandler());
registerAttribHandler(ClientCookie.PORT_ATTR, new RFC2965PortAttributeHandler());
registerAttribHandler(ClientCookie.COMMENTURL_ATTR, new RFC2965CommentUrlAttributeHandler());
registerAttribHandler(ClientCookie.DISCARD_ATTR, new RFC2965DiscardAttributeHandler());
registerAttribHandler(ClientCookie.VERSION_ATTR, new RFC2965VersionAttributeHandler());
}
@Override
public List<Cookie> parse(
final Header header,
CookieOrigin origin) throws MalformedCookieException {
if (header == null) {
throw new IllegalArgumentException("Header may not be null");
}
if (origin == null) {
throw new IllegalArgumentException("Cookie origin may not be null");
}
if (!header.getName().equalsIgnoreCase(SM.SET_COOKIE2)) {
throw new MalformedCookieException("Unrecognized cookie header '"
+ header.toString() + "'");
}
origin = adjustEffectiveHost(origin);
HeaderElement[] elems = header.getElements();
return createCookies(elems, origin);
}
@Override
protected List<Cookie> parse(
final HeaderElement[] elems,
CookieOrigin origin) throws MalformedCookieException {
origin = adjustEffectiveHost(origin);
return createCookies(elems, origin);
}
private List<Cookie> createCookies(
final HeaderElement[] elems,
final CookieOrigin origin) throws MalformedCookieException {
List<Cookie> cookies = new ArrayList<Cookie>(elems.length);
for (HeaderElement headerelement : elems) {
String name = headerelement.getName();
String value = headerelement.getValue();
if (name == null || name.length() == 0) {
throw new MalformedCookieException("Cookie name may not be empty");
}
BasicClientCookie2 cookie = new BasicClientCookie2(name, value);
cookie.setPath(getDefaultPath(origin));
cookie.setDomain(getDefaultDomain(origin));
cookie.setPorts(new int [] { origin.getPort() });
// cycle through the parameters
NameValuePair[] attribs = headerelement.getParameters();
// Eliminate duplicate attributes. The first occurrence takes precedence
// See RFC2965: 3.2 Origin Server Role
Map<String, NameValuePair> attribmap =
new HashMap<String, NameValuePair>(attribs.length);
for (int j = attribs.length - 1; j >= 0; j--) {
NameValuePair param = attribs[j];
attribmap.put(param.getName().toLowerCase(Locale.ENGLISH), param);
}
for (Map.Entry<String, NameValuePair> entry : attribmap.entrySet()) {
NameValuePair attrib = entry.getValue();
String s = attrib.getName().toLowerCase(Locale.ENGLISH);
cookie.setAttribute(s, attrib.getValue());
CookieAttributeHandler handler = findAttribHandler(s);
if (handler != null) {
handler.parse(cookie, attrib.getValue());
}
}
cookies.add(cookie);
}
return cookies;
}
@Override
public void validate(final Cookie cookie, CookieOrigin origin)
throws MalformedCookieException {
if (cookie == null) {
throw new IllegalArgumentException("Cookie may not be null");
}
if (origin == null) {
throw new IllegalArgumentException("Cookie origin may not be null");
}
origin = adjustEffectiveHost(origin);
super.validate(cookie, origin);
}
@Override
public boolean match(final Cookie cookie, CookieOrigin origin) {
if (cookie == null) {
throw new IllegalArgumentException("Cookie may not be null");
}
if (origin == null) {
throw new IllegalArgumentException("Cookie origin may not be null");
}
origin = adjustEffectiveHost(origin);
return super.match(cookie, origin);
}
/**
* Adds valid Port attribute value, e.g. "8000,8001,8002"
*/
@Override
protected void formatCookieAsVer(final CharArrayBuffer buffer,
final Cookie cookie, int version) {
super.formatCookieAsVer(buffer, cookie, version);
// format port attribute
if (cookie instanceof ClientCookie) {
// Test if the port attribute as set by the origin server is not blank
String s = ((ClientCookie) cookie).getAttribute(ClientCookie.PORT_ATTR);
if (s != null) {
buffer.append("; $Port");
buffer.append("=\"");
if (s.trim().length() > 0) {
int[] ports = cookie.getPorts();
if (ports != null) {
for (int i = 0, len = ports.length; i < len; i++) {
if (i > 0) {
buffer.append(",");
}
buffer.append(Integer.toString(ports[i]));
}
}
}
buffer.append("\"");
}
}
}
/**
* Set 'effective host name' as defined in RFC 2965.
* <p>
* If a host name contains no dots, the effective host name is
* that name with the string .local appended to it. Otherwise
* the effective host name is the same as the host name. Note
* that all effective host names contain at least one dot.
*
* @param origin origin where cookie is received from or being sent to.
*/
private static CookieOrigin adjustEffectiveHost(final CookieOrigin origin) {
String host = origin.getHost();
// Test if the host name appears to be a fully qualified DNS name,
// IPv4 address or IPv6 address
boolean isLocalHost = true;
for (int i = 0; i < host.length(); i++) {
char ch = host.charAt(i);
if (ch == '.' || ch == ':') {
isLocalHost = false;
break;
}
}
if (isLocalHost) {
host += ".local";
return new CookieOrigin(
host,
origin.getPort(),
origin.getPath(),
origin.isSecure());
} else {
return origin;
}
}
@Override
public int getVersion() {
return 1;
}
@Override
public Header getVersionHeader() {
CharArrayBuffer buffer = new CharArrayBuffer(40);
buffer.append(SM.COOKIE2);
buffer.append(": ");
buffer.append("$Version=");
buffer.append(Integer.toString(getVersion()));
return new BufferedHeader(buffer);
}
@Override
public String toString() {
return "rfc2965";
}
}
" |
"" | "/***************************************************************************
* Copyright 2017 Kieker Project (http://kieker-monitoring.net)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************/
package kieker.common.record.misc;
import java.nio.BufferOverflowException;
import kieker.common.record.AbstractMonitoringRecord;
import kieker.common.record.IMonitoringRecord;
import kieker.common.record.io.IValueDeserializer;
import kieker.common.record.io.IValueSerializer;
import kieker.common.util.registry.IRegistry;
/**
* @author Jan Waller
* API compatibility: Kieker 1.13.0
*
* @since 1.7
*/
public class KiekerMetadataRecord extends AbstractMonitoringRecord implements IMonitoringRecord.Factory, IMonitoringRecord.BinaryFactory {
private static final long serialVersionUID = 8241152536143822747L;
/** Descriptive definition of the serialization size of the record. */
public static final int SIZE = TYPE_SIZE_STRING // KiekerMetadataRecord.version
+ TYPE_SIZE_STRING // KiekerMetadataRecord.controllerName
+ TYPE_SIZE_STRING // KiekerMetadataRecord.hostname
+ TYPE_SIZE_INT // KiekerMetadataRecord.experimentId
+ TYPE_SIZE_BOOLEAN // KiekerMetadataRecord.debugMode
+ TYPE_SIZE_LONG // KiekerMetadataRecord.timeOffset
+ TYPE_SIZE_STRING // KiekerMetadataRecord.timeUnit
+ TYPE_SIZE_LONG // KiekerMetadataRecord.numberOfRecords
;
public static final Class<?>[] TYPES = {
String.class, // KiekerMetadataRecord.version
String.class, // KiekerMetadataRecord.controllerName
String.class, // KiekerMetadataRecord.hostname
int.class, // KiekerMetadataRecord.experimentId
boolean.class, // KiekerMetadataRecord.debugMode
long.class, // KiekerMetadataRecord.timeOffset
String.class, // KiekerMetadataRecord.timeUnit
long.class, // KiekerMetadataRecord.numberOfRecords
};
/** user-defined constants. */
public static final String NO_CONTROLLERNAME = "<no-controller-name>";
public static final String NO_HOSTNAME = "<no-hostname>";
public static final String NO_TIMESOURCE = "<no-timesource>";
public static final String NO_TIMEUNIT = "NANOSECONDS";
/** default constants. */
public static final String VERSION = kieker.common.util.Version.getVERSION();
public static final String CONTROLLER_NAME = NO_CONTROLLERNAME;
public static final String HOSTNAME = NO_HOSTNAME;
public static final int EXPERIMENT_ID = 0;
public static final boolean DEBUG_MODE = false;
public static final long TIME_OFFSET = 0L;
public static final String TIME_UNIT = NO_TIMEUNIT;
public static final long NUMBER_OF_RECORDS = 0L;
/** property name array. */
private static final String[] PROPERTY_NAMES = {
"version",
"controllerName",
"hostname",
"experimentId",
"debugMode",
"timeOffset",
"timeUnit",
"numberOfRecords",
};
/** property declarations. */
private final String version;
private final String controllerName;
private final String hostname;
private final int experimentId;
private final boolean debugMode;
private final long timeOffset;
private final String timeUnit;
private final long numberOfRecords;
/**
* Creates a new instance of this class using the given parameters.
*
* @param version
* version
* @param controllerName
* controllerName
* @param hostname
* hostname
* @param experimentId
* experimentId
* @param debugMode
* debugMode
* @param timeOffset
* timeOffset
* @param timeUnit
* timeUnit
* @param numberOfRecords
* numberOfRecords
*/
public KiekerMetadataRecord(final String version, final String controllerName, final String hostname, final int experimentId, final boolean debugMode, final long timeOffset, final String timeUnit, final long numberOfRecords) {
this.version = version == null?VERSION:version;
this.controllerName = controllerName == null?NO_CONTROLLERNAME:controllerName;
this.hostname = hostname == null?NO_HOSTNAME:hostname;
this.experimentId = experimentId;
this.debugMode = debugMode;
this.timeOffset = timeOffset;
this.timeUnit = timeUnit == null?NO_TIMEUNIT:timeUnit;
this.numberOfRecords = numberOfRecords;
}
/**
* This constructor converts the given array into a record.
* It is recommended to use the array which is the result of a call to {@link #toArray()}.
*
* @param values
* The values for the record.
*
* @deprecated since 1.13. Use {@link #KiekerMetadataRecord(IValueDeserializer)} instead.
*/
@Deprecated
public KiekerMetadataRecord(final Object[] values) { // NOPMD (direct store of values)
AbstractMonitoringRecord.checkArray(values, TYPES);
this.version = (String) values[0];
this.controllerName = (String) values[1];
this.hostname = (String) values[2];
this.experimentId = (Integer) values[3];
this.debugMode = (Boolean) values[4];
this.timeOffset = (Long) values[5];
this.timeUnit = (String) values[6];
this.numberOfRecords = (Long) values[7];
}
/**
* This constructor uses the given array to initialize the fields of this record.
*
* @param values
* The values for the record.
* @param valueTypes
* The types of the elements in the first array.
*
* @deprecated since 1.13. Use {@link #KiekerMetadataRecord(IValueDeserializer)} instead.
*/
@Deprecated
protected KiekerMetadataRecord(final Object[] values, final Class<?>[] valueTypes) { // NOPMD (values stored directly)
AbstractMonitoringRecord.checkArray(values, valueTypes);
this.version = (String) values[0];
this.controllerName = (String) values[1];
this.hostname = (String) values[2];
this.experimentId = (Integer) values[3];
this.debugMode = (Boolean) values[4];
this.timeOffset = (Long) values[5];
this.timeUnit = (String) values[6];
this.numberOfRecords = (Long) values[7];
}
/**
* @param deserializer
* The deserializer to use
*/
public KiekerMetadataRecord(final IValueDeserializer deserializer) {
this.version = deserializer.getString();
this.controllerName = deserializer.getString();
this.hostname = deserializer.getString();
this.experimentId = deserializer.getInt();
this.debugMode = deserializer.getBoolean();
this.timeOffset = deserializer.getLong();
this.timeUnit = deserializer.getString();
this.numberOfRecords = deserializer.getLong();
}
/**
* {@inheritDoc}
*
* @deprecated since 1.13. Use {@link #serialize(IValueSerializer)} with an array serializer instead.
*/
@Override
@Deprecated
public Object[] toArray() {
return new Object[] {
this.getVersion(),
this.getControllerName(),
this.getHostname(),
this.getExperimentId(),
this.isDebugMode(),
this.getTimeOffset(),
this.getTimeUnit(),
this.getNumberOfRecords()
};
}
/**
* {@inheritDoc}
*/
@Override
public void registerStrings(final IRegistry<String> stringRegistry) { // NOPMD (generated code)
stringRegistry.get(this.getVersion());
stringRegistry.get(this.getControllerName());
stringRegistry.get(this.getHostname());
stringRegistry.get(this.getTimeUnit());
}
/**
* {@inheritDoc}
*/
@Override
public void serialize(final IValueSerializer serializer) throws BufferOverflowException {
//super.serialize(serializer);
serializer.putString(this.getVersion());
serializer.putString(this.getControllerName());
serializer.putString(this.getHostname());
serializer.putInt(this.getExperimentId());
serializer.putBoolean(this.isDebugMode());
serializer.putLong(this.getTimeOffset());
serializer.putString(this.getTimeUnit());
serializer.putLong(this.getNumberOfRecords());
}
/**
* {@inheritDoc}
*/
@Override
public Class<?>[] getValueTypes() {
return TYPES; // NOPMD
}
/**
* {@inheritDoc}
*/
@Override
public String[] getValueNames() {
return PROPERTY_NAMES; // NOPMD
}
/**
* {@inheritDoc}
*/
@Override
public int getSize() {
return SIZE;
}
/**
* {@inheritDoc}
*
* @deprecated This record uses the {@link kieker.common.record.IMonitoringRecord.Factory} mechanism. Hence, this method is not implemented.
*/
@Override
@Deprecated
public void initFromArray(final Object[] values) {
throw new UnsupportedOperationException();
}
/**
* {@inheritDoc}
*/
@Override
public boolean equals(final Object obj) {
if (obj == null) return false;
if (obj == this) return true;
if (obj.getClass() != this.getClass()) return false;
final KiekerMetadataRecord castedRecord = (KiekerMetadataRecord) obj;
if (this.getLoggingTimestamp() != castedRecord.getLoggingTimestamp()) return false;
if (!this.getVersion().equals(castedRecord.getVersion())) return false;
if (!this.getControllerName().equals(castedRecord.getControllerName())) return false;
if (!this.getHostname().equals(castedRecord.getHostname())) return false;
if (this.getExperimentId() != castedRecord.getExperimentId()) return false;
if (this.isDebugMode() != castedRecord.isDebugMode()) return false;
if (this.getTimeOffset() != castedRecord.getTimeOffset()) return false;
if (!this.getTimeUnit().equals(castedRecord.getTimeUnit())) return false;
if (this.getNumberOfRecords() != castedRecord.getNumberOfRecords()) return false;
return true;
}
public final String getVersion() {
return this.version;
}
public final String getControllerName() {
return this.controllerName;
}
public final String getHostname() {
return this.hostname;
}
public final int getExperimentId() {
return this.experimentId;
}
public final boolean isDebugMode() {
return this.debugMode;
}
public final long getTimeOffset() {
return this.timeOffset;
}
public final String getTimeUnit() {
return this.timeUnit;
}
public final long getNumberOfRecords() {
return this.numberOfRecords;
}
}
" |
"" | "/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import javax.annotation.Nullable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.MultipartUpload;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.SelectObjectContentRequest;
import com.amazonaws.services.s3.model.SelectObjectContentResult;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;
import com.amazonaws.services.s3.transfer.model.UploadResult;
import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.s3a.api.RequestFactory;
import org.apache.hadoop.fs.s3a.impl.StoreContext;
import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext;
import org.apache.hadoop.fs.s3a.select.SelectBinding;
import org.apache.hadoop.fs.store.audit.AuditSpan;
import org.apache.hadoop.fs.store.audit.AuditSpanSource;
import org.apache.hadoop.util.DurationInfo;
import org.apache.hadoop.util.functional.CallableRaisingIOE;
import static org.apache.hadoop.util.Preconditions.checkNotNull;
import static org.apache.hadoop.fs.s3a.Invoker.*;
import static org.apache.hadoop.fs.store.audit.AuditingFunctions.withinAuditSpan;
/**
* Helper for low-level operations against an S3 Bucket for writing data,
* creating and committing pending writes, and other S3-layer operations.
* <p>
* It hides direct access to the S3 API
* and is a location where the object operations can be evolved/enhanced.
* <p>
* Features
* <ul>
* <li>Methods to create and submit requests to S3, so avoiding
* all direct interaction with the AWS APIs.</li>
* <li>Some extra preflight checks of arguments, so failing fast on
* errors.</li>
* <li>Callbacks to let the FS know of events in the output stream
* upload process.</li>
* <li>Other low-level access to S3 functions, for private use.</li>
* <li>Failure handling, including converting exceptions to IOEs.</li>
* <li>Integration with instrumentation.</li>
* <li>Evolution to add more low-level operations, such as S3 select.</li>
* </ul>
*
* This API is for internal use only.
* Span scoping: This helper is instantiated with span; it will be used
* before operations which query/update S3
*
* History
* <pre>
* - A nested class in S3AFileSystem
* - Single shared instance created and reused.
* - [HADOOP-13786] A separate class, single instance in S3AFS
* - [HDFS-13934] Split into interface and implementation
* - [HADOOP-15711] Adds audit tracking; one instance per use.
* </pre>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class WriteOperationHelper implements WriteOperations {
private static final Logger LOG =
LoggerFactory.getLogger(WriteOperationHelper.class);
/**
* Owning filesystem.
*/
private final S3AFileSystem owner;
/**
* Invoker for operations; uses the S3A retry policy and calls int
* {@link #operationRetried(String, Exception, int, boolean)} on retries.
*/
private final Invoker invoker;
/** Configuration of the owner. This is a reference, not a copy. */
private final Configuration conf;
/** Bucket of the owner FS. */
private final String bucket;
/**
* statistics context.
*/
private final S3AStatisticsContext statisticsContext;
/**
* Store Context; extracted from owner.
*/
private final StoreContext storeContext;
/**
* Source of Audit spans.
*/
private final AuditSpanSource auditSpanSource;
/**
* Audit Span.
*/
private AuditSpan auditSpan;
/**
* Factory for AWS requests.
*/
private final RequestFactory requestFactory;
/**
* Constructor.
* @param owner owner FS creating the helper
* @param conf Configuration object
* @param statisticsContext statistics context
* @param auditSpanSource source of spans
* @param auditSpan span to activate
*
*/
protected WriteOperationHelper(S3AFileSystem owner,
Configuration conf,
S3AStatisticsContext statisticsContext,
final AuditSpanSource auditSpanSource,
final AuditSpan auditSpan) {
this.owner = owner;
this.invoker = new Invoker(new S3ARetryPolicy(conf),
this::operationRetried);
this.conf = conf;
this.statisticsContext = statisticsContext;
this.storeContext = owner.createStoreContext();
this.bucket = owner.getBucket();
this.auditSpanSource = auditSpanSource;
this.auditSpan = checkNotNull(auditSpan);
this.requestFactory = owner.getRequestFactory();
}
/**
* Callback from {@link Invoker} when an operation is retried.
* @param text text of the operation
* @param ex exception
* @param retries number of retries
* @param idempotent is the method idempotent
*/
void operationRetried(String text, Exception ex, int retries,
boolean idempotent) {
LOG.info("{}: Retried {}: {}", text, retries, ex.toString());
LOG.debug("Stack", ex);
owner.operationRetried(text, ex, retries, idempotent);
}
/**
* Execute a function with retry processing.
* Also activates the current span.
* @param <T> type of return value
* @param action action to execute (used in error messages)
* @param path path of work (used in error messages)
* @param idempotent does the operation have semantics
* which mean that it can be retried even if was already executed?
* @param operation operation to execute
* @return the result of the call
* @throws IOException any IOE raised, or translated exception
*/
public <T> T retry(String action,
String path,
boolean idempotent,
CallableRaisingIOE<T> operation)
throws IOException {
activateAuditSpan();
return invoker.retry(action, path, idempotent, operation);
}
/**
* Get the audit span this object was created with.
* @return the audit span
*/
public AuditSpan getAuditSpan() {
return auditSpan;
}
/**
* Activate the audit span.
* @return the span
*/
private AuditSpan activateAuditSpan() {
return auditSpan.activate();
}
/**
* Deactivate the audit span.
*/
private void deactivateAuditSpan() {
auditSpan.deactivate();
}
/**
* Create a {@link PutObjectRequest} request against the specific key.
* @param destKey destination key
* @param inputStream source data.
* @param length size, if known. Use -1 for not known
* @param headers optional map of custom headers.
* @return the request
*/
@Retries.OnceRaw
public PutObjectRequest createPutObjectRequest(String destKey,
InputStream inputStream,
long length,
final Map<String, String> headers) {
activateAuditSpan();
ObjectMetadata objectMetadata = newObjectMetadata(length);
if (headers != null) {
objectMetadata.setUserMetadata(headers);
}
return getRequestFactory().newPutObjectRequest(
destKey,
objectMetadata,
inputStream);
}
/**
* Create a {@link PutObjectRequest} request to upload a file.
* @param dest key to PUT to.
* @param sourceFile source file
* @return the request
*/
@Retries.OnceRaw
public PutObjectRequest createPutObjectRequest(String dest,
File sourceFile) {
Preconditions.checkState(sourceFile.length() < Integer.MAX_VALUE,
"File length is too big for a single PUT upload");
activateAuditSpan();
return getRequestFactory().
newPutObjectRequest(dest,
newObjectMetadata((int) sourceFile.length()),
sourceFile);
}
/**
* Callback on a successful write.
* @param length length of the write
*/
public void writeSuccessful(long length) {
}
/**
* Callback on a write failure.
* @param ex Any exception raised which triggered the failure.
*/
public void writeFailed(Exception ex) {
LOG.debug("Write to {} failed", this, ex);
}
/**
* Create a new object metadata instance.
* Any standard metadata headers are added here, for example:
* encryption.
* @param length size, if known. Use -1 for not known
* @return a new metadata instance
*/
public ObjectMetadata newObjectMetadata(long length) {
return getRequestFactory().newObjectMetadata(length);
}
/**
* Start the multipart upload process.
* Retry policy: retrying, translated.
* @param destKey destination of upload
* @return the upload result containing the ID
* @throws IOException IO problem
*/
@Retries.RetryTranslated
public String initiateMultiPartUpload(String destKey) throws IOException {
LOG.debug("Initiating Multipart upload to {}", destKey);
try (AuditSpan span = activateAuditSpan()) {
return retry("initiate MultiPartUpload", destKey, true,
() -> {
final InitiateMultipartUploadRequest initiateMPURequest =
getRequestFactory().newMultipartUploadRequest(
destKey);
return owner.initiateMultipartUpload(initiateMPURequest)
.getUploadId();
});
}
}
/**
* Finalize a multipart PUT operation.
* This completes the upload, and, if that works, calls
* {@link S3AFileSystem#finishedWrite(String, long, String, String)}
* to update the filesystem.
* Retry policy: retrying, translated.
* @param destKey destination of the commit
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @param length length of the upload
* @param retrying retrying callback
* @return the result of the operation.
* @throws IOException on problems.
*/
@Retries.RetryTranslated
private CompleteMultipartUploadResult finalizeMultipartUpload(
String destKey,
String uploadId,
List<PartETag> partETags,
long length,
Retried retrying) throws IOException {
if (partETags.isEmpty()) {
throw new PathIOException(destKey,
"No upload parts in multipart upload");
}
try (AuditSpan span = activateAuditSpan()) {
CompleteMultipartUploadResult uploadResult;
uploadResult = invoker.retry("Completing multipart upload", destKey,
true,
retrying,
() -> {
final CompleteMultipartUploadRequest request =
getRequestFactory().newCompleteMultipartUploadRequest(
destKey, uploadId, partETags);
return owner.getAmazonS3Client().completeMultipartUpload(
request);
});
owner.finishedWrite(destKey, length, uploadResult.getETag(),
uploadResult.getVersionId());
return uploadResult;
}
}
/**
* This completes a multipart upload to the destination key via
* {@code finalizeMultipartUpload()}.
* Retry policy: retrying, translated.
* Retries increment the {@code errorCount} counter.
* @param destKey destination
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @param length length of the upload
* @param errorCount a counter incremented by 1 on every error; for
* use in statistics
* @return the result of the operation.
* @throws IOException if problems arose which could not be retried, or
* the retry count was exceeded
*/
@Retries.RetryTranslated
public CompleteMultipartUploadResult completeMPUwithRetries(
String destKey,
String uploadId,
List<PartETag> partETags,
long length,
AtomicInteger errorCount)
throws IOException {
checkNotNull(uploadId);
checkNotNull(partETags);
LOG.debug("Completing multipart upload {} with {} parts",
uploadId, partETags.size());
return finalizeMultipartUpload(destKey,
uploadId,
partETags,
length,
(text, e, r, i) -> errorCount.incrementAndGet()
);
}
/**
* Abort a multipart upload operation.
* @param destKey destination key of the upload
* @param uploadId multipart operation Id
* @param shouldRetry should failures trigger a retry?
* @param retrying callback invoked on every retry
* @throws IOException failure to abort
* @throws FileNotFoundException if the abort ID is unknown
*/
@Retries.RetryTranslated
public void abortMultipartUpload(String destKey, String uploadId,
boolean shouldRetry, Retried retrying)
throws IOException {
if (shouldRetry) {
// retrying option
invoker.retry("Aborting multipart upload ID " + uploadId,
destKey,
true,
retrying,
withinAuditSpan(getAuditSpan(), () ->
owner.abortMultipartUpload(
destKey, uploadId)));
} else {
// single pass attempt.
once("Aborting multipart upload ID " + uploadId,
destKey,
withinAuditSpan(getAuditSpan(), () ->
owner.abortMultipartUpload(
destKey,
uploadId)));
}
}
/**
* Abort a multipart commit operation.
* @param upload upload to abort.
* @throws IOException on problems.
*/
@Retries.RetryTranslated
public void abortMultipartUpload(MultipartUpload upload)
throws IOException {
invoker.retry("Aborting multipart commit", upload.getKey(), true,
withinAuditSpan(getAuditSpan(),
() -> owner.abortMultipartUpload(upload)));
}
/**
* Abort multipart uploads under a path: limited to the first
* few hundred.
* @param prefix prefix for uploads to abort
* @return a count of aborts
* @throws IOException trouble; FileNotFoundExceptions are swallowed.
*/
@Retries.RetryTranslated
public int abortMultipartUploadsUnderPath(String prefix)
throws IOException {
LOG.debug("Aborting multipart uploads under {}", prefix);
int count = 0;
List<MultipartUpload> multipartUploads = listMultipartUploads(prefix);
LOG.debug("Number of outstanding uploads: {}", multipartUploads.size());
for (MultipartUpload upload: multipartUploads) {
try {
abortMultipartUpload(upload);
count++;
} catch (FileNotFoundException e) {
LOG.debug("Already aborted: {}", upload.getKey(), e);
}
}
return count;
}
@Override
@Retries.RetryTranslated
public List<MultipartUpload> listMultipartUploads(final String prefix)
throws IOException {
activateAuditSpan();
return owner.listMultipartUploads(prefix);
}
/**
* Abort a multipart commit operation.
* @param destKey destination key of ongoing operation
* @param uploadId multipart operation Id
* @throws IOException on problems.
* @throws FileNotFoundException if the abort ID is unknown
*/
@Override
@Retries.RetryTranslated
public void abortMultipartCommit(String destKey, String uploadId)
throws IOException {
abortMultipartUpload(destKey, uploadId, true, invoker.getRetryCallback());
}
/**
* Create and initialize a part request of a multipart upload.
* Exactly one of: {@code uploadStream} or {@code sourceFile}
* must be specified.
* A subset of the file may be posted, by providing the starting point
* in {@code offset} and a length of block in {@code size} equal to
* or less than the remaining bytes.
* The part number must be less than 10000.
* Retry policy is once-translated; to much effort
* @param destKey destination key of ongoing operation
* @param uploadId ID of ongoing upload
* @param partNumber current part number of the upload
* @param size amount of data
* @param uploadStream source of data to upload
* @param sourceFile optional source file.
* @param offset offset in file to start reading.
* @return the request.
* @throws IllegalArgumentException if the parameters are invalid.
* @throws PathIOException if the part number is out of range.
*/
@Override
@Retries.OnceTranslated
public UploadPartRequest newUploadPartRequest(
String destKey,
String uploadId,
int partNumber,
int size,
InputStream uploadStream,
File sourceFile,
Long offset) throws IOException {
return once("upload part request", destKey,
withinAuditSpan(getAuditSpan(), () ->
getRequestFactory().newUploadPartRequest(
destKey,
uploadId,
partNumber,
size,
uploadStream,
sourceFile,
offset)));
}
/**
* The toString method is intended to be used in logging/toString calls.
* @return a string description.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"WriteOperationHelper {bucket=").append(bucket);
sb.append('}');
return sb.toString();
}
/**
* PUT an object directly (i.e. not via the transfer manager).
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* @param putObjectRequest the request
* @return the upload initiated
* @throws IOException on problems
*/
@Retries.RetryTranslated
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws IOException {
return retry("Writing Object",
putObjectRequest.getKey(), true,
withinAuditSpan(getAuditSpan(), () ->
owner.putObjectDirect(putObjectRequest)));
}
/**
* PUT an object via the transfer manager.
* @param putObjectRequest the request
* @return the result of the operation
* @throws IOException on problems
*/
@Retries.RetryTranslated
public UploadResult uploadObject(PutObjectRequest putObjectRequest)
throws IOException {
// no retry; rely on xfer manager logic
return retry("Writing Object",
putObjectRequest.getKey(), true,
withinAuditSpan(getAuditSpan(), () ->
owner.executePut(putObjectRequest, null)));
}
/**
* Revert a commit by deleting the file.
* Relies on retry code in filesystem
* @throws IOException on problems
* @param destKey destination key
*/
@Retries.OnceTranslated
public void revertCommit(String destKey) throws IOException {
once("revert commit", destKey,
withinAuditSpan(getAuditSpan(), () -> {
Path destPath = owner.keyToQualifiedPath(destKey);
owner.deleteObjectAtPath(destPath,
destKey, true);
owner.maybeCreateFakeParentDirectory(destPath);
}));
}
/**
* This completes a multipart upload to the destination key via
* {@code finalizeMultipartUpload()}.
* Retry policy: retrying, translated.
* Retries increment the {@code errorCount} counter.
* @param destKey destination
* @param uploadId multipart operation Id
* @param partETags list of partial uploads
* @param length length of the upload
* @return the result of the operation.
* @throws IOException if problems arose which could not be retried, or
* the retry count was exceeded
*/
@Retries.RetryTranslated
public CompleteMultipartUploadResult commitUpload(
String destKey,
String uploadId,
List<PartETag> partETags,
long length)
throws IOException {
checkNotNull(uploadId);
checkNotNull(partETags);
LOG.debug("Completing multipart upload {} with {} parts",
uploadId, partETags.size());
return finalizeMultipartUpload(destKey,
uploadId,
partETags,
length,
Invoker.NO_OP
);
}
/**
* Upload part of a multi-partition file.
* @param request request
* @return the result of the operation.
* @throws IOException on problems
*/
@Retries.RetryTranslated
public UploadPartResult uploadPart(UploadPartRequest request)
throws IOException {
return retry("upload part #" + request.getPartNumber()
+ " upload ID " + request.getUploadId(),
request.getKey(),
true,
withinAuditSpan(getAuditSpan(),
() -> owner.uploadPart(request)));
}
/**
* Get the configuration of this instance; essentially the owning
* filesystem configuration.
* @return the configuration.
*/
public Configuration getConf() {
return conf;
}
/**
* Create a S3 Select request for the destination path.
* This does not build the query.
* @param path pre-qualified path for query
* @return the request
*/
public SelectObjectContentRequest newSelectRequest(Path path) {
try (AuditSpan span = getAuditSpan()) {
return getRequestFactory().newSelectRequest(
storeContext.pathToKey(path));
}
}
/**
* Execute an S3 Select operation.
* On a failure, the request is only logged at debug to avoid the
* select exception being printed.
* @param source source for selection
* @param request Select request to issue.
* @param action the action for use in exception creation
* @return response
* @throws IOException failure
*/
@Retries.RetryTranslated
public SelectObjectContentResult select(
final Path source,
final SelectObjectContentRequest request,
final String action)
throws IOException {
// no setting of span here as the select binding is (statically) created
// without any span.
String bucketName = request.getBucketName();
Preconditions.checkArgument(bucket.equals(bucketName),
"wrong bucket: %s", bucketName);
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating select call {} {}",
source, request.getExpression());
LOG.debug(SelectBinding.toString(request));
}
return invoker.retry(
action,
source.toString(),
true,
withinAuditSpan(getAuditSpan(), () -> {
try (DurationInfo ignored =
new DurationInfo(LOG, "S3 Select operation")) {
try {
return owner.getAmazonS3Client().selectObjectContent(request);
} catch (AmazonS3Exception e) {
LOG.error("Failure of S3 Select request against {}",
source);
LOG.debug("S3 Select request against {}:\n{}",
source,
SelectBinding.toString(request),
e);
throw e;
}
}
}));
}
@Override
public AuditSpan createSpan(final String operation,
@Nullable final String path1,
@Nullable final String path2) throws IOException {
return auditSpanSource.createSpan(operation, path1, path2);
}
@Override
public void incrementWriteOperations() {
owner.incrementWriteOperations();
}
/**
* Deactivate the audit span.
*/
@Override
public void close() throws IOException {
deactivateAuditSpan();
}
/**
* Get the request factory which uses this store's audit span.
* @return the request factory.
*/
public RequestFactory getRequestFactory() {
return requestFactory;
}
}
" |
"" | "/*
* Copyright 2016 DiffPlug
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.diffplug.gradle.spotless;
import java.io.File;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Random;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import org.gradle.api.GradleException;
import org.gradle.api.Project;
import org.gradle.api.file.FileCollection;
import org.gradle.api.internal.file.UnionFileCollection;
import com.diffplug.spotless.FormatExceptionPolicyStrict;
import com.diffplug.spotless.FormatterFunc;
import com.diffplug.spotless.FormatterStep;
import com.diffplug.spotless.LazyForwardingEquality;
import com.diffplug.spotless.LineEnding;
import com.diffplug.spotless.ThrowingEx;
import com.diffplug.spotless.generic.EndWithNewlineStep;
import com.diffplug.spotless.generic.IndentStep;
import com.diffplug.spotless.generic.LicenseHeaderStep;
import com.diffplug.spotless.generic.ReplaceRegexStep;
import com.diffplug.spotless.generic.ReplaceStep;
import com.diffplug.spotless.generic.TrimTrailingWhitespaceStep;
import groovy.lang.Closure;
/** Adds a `spotless{Name}Check` and `spotless{Name}Apply` task. */
public class FormatExtension {
final SpotlessExtension root;
public FormatExtension(SpotlessExtension root) {
this.root = root;
}
private String formatName() {
for (Map.Entry<String, FormatExtension> entry : root.formats.entrySet()) {
if (entry.getValue() == this) {
return entry.getKey();
}
}
throw new IllegalStateException("This format is not contained by any SpotlessExtension.");
}
boolean paddedCell = false;
/** Enables paddedCell mode. @see <a href="https://github.com/diffplug/spotless/blob/master/PADDEDCELL.md">Padded cell</a> */
public void paddedCell() {
paddedCell(true);
}
/** Enables paddedCell mode. @see <a href="https://github.com/diffplug/spotless/blob/master/PADDEDCELL.md">Padded cell</a> */
public void paddedCell(boolean paddedCell) {
this.paddedCell = paddedCell;
}
LineEnding lineEndings;
/** Returns the line endings to use (defaults to {@link SpotlessExtension#getLineEndings()}. */
public LineEnding getLineEndings() {
return lineEndings == null ? root.getLineEndings() : lineEndings;
}
/** Sets the line endings to use (defaults to {@link SpotlessExtension#getLineEndings()}. */
public void setLineEndings(LineEnding lineEndings) {
this.lineEndings = lineEndings;
}
Charset encoding;
/** Returns the encoding to use (defaults to {@link SpotlessExtension#getEncoding()}. */
public Charset getEncoding() {
return encoding == null ? root.getEncoding() : encoding;
}
/** Sets the encoding to use (defaults to {@link SpotlessExtension#getEncoding()}. */
public void setEncoding(String name) {
setEncoding(Charset.forName(name));
}
/** Sets the encoding to use (defaults to {@link SpotlessExtension#getEncoding()}. */
public void setEncoding(Charset charset) {
encoding = Objects.requireNonNull(charset);
}
FormatExceptionPolicyStrict exceptionPolicy = new FormatExceptionPolicyStrict();
/** Ignores errors in the given step. */
public void ignoreErrorForStep(String stepName) {
exceptionPolicy.excludeStep(stepName);
}
/** Ignores errors for the given relative path. */
public void ignoreErrorForPath(String relativePath) {
exceptionPolicy.excludePath(relativePath);
}
/** Sets encoding to use (defaults to {@link SpotlessExtension#getEncoding()}). */
public void encoding(String charset) {
setEncoding(charset);
}
/** The files that need to be formatted. */
protected FileCollection target;
/**
* FileCollections pass through raw.
* Strings are treated as the 'include' arg to fileTree, with project.rootDir as the dir.
* List<String> are treated as the 'includes' arg to fileTree, with project.rootDir as the dir.
* Anything else gets passed to getProject().files().
*/
public void target(Object... targets) {
if (targets.length == 0) {
this.target = getProject().files();
} else if (targets.length == 1) {
this.target = parseTarget(targets[0]);
} else {
if (Stream.of(targets).allMatch(o -> o instanceof String)) {
this.target = parseTarget(Arrays.asList(targets));
} else {
UnionFileCollection union = new UnionFileCollection();
for (Object target : targets) {
union.add(parseTarget(target));
}
this.target = union;
}
}
}
@SuppressWarnings("unchecked")
protected FileCollection parseTarget(Object target) {
if (target instanceof FileCollection) {
return (FileCollection) target;
} else if (target instanceof String ||
(target instanceof List && ((List<?>) target).stream().allMatch(o -> o instanceof String))) {
// since people are likely to do '**/*.md', we want to make sure to exclude folders
// they don't want to format which will slow down the operation greatly
File dir = getProject().getProjectDir();
List<String> excludes = new ArrayList<>();
// no git
excludes.add(".git");
// no .gradle
if (getProject() == getProject().getRootProject()) {
excludes.add(".gradle");
}
// no build folders
excludes.add(relativize(dir, getProject().getBuildDir()));
for (Project subproject : getProject().getSubprojects()) {
excludes.add(relativize(dir, subproject.getBuildDir()));
}
if (target instanceof String) {
return (FileCollection) getProject().fileTree(dir).include((String) target).exclude(excludes);
} else {
// target can only be a List<String> at this point
return (FileCollection) getProject().fileTree(dir).include((List<String>) target).exclude(excludes);
}
} else {
return getProject().files(target);
}
}
static String relativize(File root, File dest) {
String rootPath = root.getAbsolutePath();
String destPath = dest.getAbsolutePath();
if (!destPath.startsWith(rootPath)) {
throw new IllegalArgumentException(dest + " is not a child of " + root);
} else {
return destPath.substring(rootPath.length());
}
}
/** The steps that need to be added. */
protected List<FormatterStep> steps = new ArrayList<>();
/** Adds a new step. */
public void addStep(FormatterStep newStep) {
FormatterStep existing = getExistingStep(newStep.getName());
if (existing != null) {
throw new GradleException("Multiple steps with name '" + newStep.getName() + "' for spotless format '" + formatName() + "'");
}
steps.add(newStep);
}
/** Returns the existing step with the given name, if any. */
@Nullable
protected FormatterStep getExistingStep(String stepName) {
for (FormatterStep step : steps) {
if (stepName.equals(step.getName())) {
return step;
}
}
return null;
}
/** Replaces the given step. */
protected void replaceStep(FormatterStep replacementStep) {
FormatterStep existing = getExistingStep(replacementStep.getName());
if (existing == null) {
throw new GradleException("Cannot replace step '" + replacementStep.getName() + "' for spotless format '" + formatName() + "' because it hasn't been added yet.");
}
int index = steps.indexOf(existing);
steps.set(index, replacementStep);
}
/** Clears all of the existing steps. */
public void clearSteps() {
steps.clear();
}
/**
* An optional performance optimization if you are using any of the `custom` or `customLazy`
* methods. If you aren't explicitly calling `custom` or `customLazy`, then this method
* has no effect.
*
* Spotless tracks what files have changed from run to run, so that it can run faster
* by only checking files which have changed, or whose formatting steps have changed.
* If you use either the `custom` or `customLazy` methods, then gradle can never mark
* your files as `up-to-date`, because it can't know if perhaps the behavior of your
* custom function has changed.
*
* If you set `bumpThisNumberIfACustomStepChanges( <some number> )`, then spotless will
* assume that the custom rules have not changed if the number has not changed. If a
* custom rule does change, then you must bump the number so that spotless will know
* that it must recheck the files it has already checked.
*/
public void bumpThisNumberIfACustomStepChanges(int number) {
globalState = number;
}
private Serializable globalState = new NeverUpToDateBetweenRuns();
static class NeverUpToDateBetweenRuns extends LazyForwardingEquality<Integer> {
private static final long serialVersionUID = 1L;
private static final Random RANDOM = new Random();
@Override
protected Integer calculateState() throws Exception {
return RANDOM.nextInt();
}
}
/**
* Adds the given custom step, which is constructed lazily for performance reasons.
*
* The resulting function will receive a string with unix-newlines, and it must return a string unix newlines.
*
* If you're getting errors about `closure cannot be cast to com.diffplug.common.base.Throwing$Function`, then use
* {@link #customLazyGroovy(String, ThrowingEx.Supplier)}.
*/
public void customLazy(String name, ThrowingEx.Supplier<FormatterFunc> formatterSupplier) {
addStep(FormatterStep.createLazy(name, () -> globalState, unusedState -> formatterSupplier.get()));
}
/** Same as {@link #customLazy(String, ThrowingEx.Supplier)}, but for Groovy closures. */
public void customLazyGroovy(String name, ThrowingEx.Supplier<Closure<String>> formatterSupplier) {
customLazy(name, () -> formatterSupplier.get()::call);
}
/** Adds a custom step. Receives a string with unix-newlines, must return a string with unix newlines. */
public void custom(String name, Closure<String> formatter) {
custom(name, formatter::call);
}
/** Adds a custom step. Receives a string with unix-newlines, must return a string with unix newlines. */
public void custom(String name, FormatterFunc formatter) {
customLazy(name, () -> formatter);
}
/** Highly efficient find-replace char sequence. */
public void replace(String name, CharSequence original, CharSequence after) {
addStep(ReplaceStep.create(name, original, after));
}
/** Highly efficient find-replace regex. */
public void replaceRegex(String name, String regex, String replacement) {
addStep(ReplaceRegexStep.create(name, regex, replacement));
}
/** Removes trailing whitespace. */
public void trimTrailingWhitespace() {
addStep(TrimTrailingWhitespaceStep.create());
}
/** Ensures that files end with a single newline. */
public void endWithNewline() {
addStep(EndWithNewlineStep.create());
}
/** Ensures that the files are indented using spaces. */
public void indentWithSpaces(int numSpacesPerTab) {
addStep(IndentStep.Type.SPACE.create(numSpacesPerTab));
}
/** Ensures that the files are indented using spaces. */
public void indentWithSpaces() {
indentWithSpaces(4);
}
/** Ensures that the files are indented using tabs. */
public void indentWithTabs(int tabToSpaces) {
addStep(IndentStep.Type.TAB.create(tabToSpaces));
}
/** Ensures that the files are indented using tabs. */
public void indentWithTabs() {
indentWithTabs(4);
}
/**
* @param licenseHeader
* Content that should be at the top of every file
* @param delimiter
* Spotless will look for a line that starts with this to know what the "top" is.
*/
public void licenseHeader(String licenseHeader, String delimiter) {
addStep(LicenseHeaderStep.createFromHeader(licenseHeader, delimiter));
}
/**
* @param licenseHeaderFile
* Content that should be at the top of every file
* @param delimiter
* Spotless will look for a line that starts with this to know what the "top" is.
*/
public void licenseHeaderFile(Object licenseHeaderFile, String delimiter) {
addStep(LicenseHeaderStep.createFromFile(getProject().file(licenseHeaderFile), getEncoding(), delimiter));
}
/** Sets up a format task according to the values in this extension. */
protected void setupTask(SpotlessTask task) {
task.setPaddedCell(paddedCell);
task.setEncoding(getEncoding().name());
task.setExceptionPolicy(exceptionPolicy);
task.setTarget(target);
task.setSteps(steps);
task.setLineEndingsPolicy(getLineEndings().createPolicy(getProject().getProjectDir(), () -> task.target));
}
/** Returns the project that this extension is attached to. */
protected Project getProject() {
return root.project;
}
}
" |
"" | "/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.athena.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* A workgroup, which contains a name, description, creation time, state, and other configuration, listed under
* <a>WorkGroup$Configuration</a>. Each workgroup enables you to isolate queries for you or your group of users from
* other queries in the same account, to configure the query results location and the encryption configuration (known as
* workgroup settings), to enable sending query metrics to Amazon CloudWatch, and to establish per-query data usage
* control limits for all queries in a workgroup. The workgroup settings override is specified in
* EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/WorkGroup" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class WorkGroup implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The workgroup name.
* </p>
*/
private String name;
/**
* <p>
* The state of the workgroup: ENABLED or DISABLED.
* </p>
*/
private String state;
/**
* <p>
* The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the
* encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for
* the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount
* of data scanned per query or per workgroup. The workgroup settings override is specified in
* EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
* </p>
*/
private WorkGroupConfiguration configuration;
/**
* <p>
* The workgroup description.
* </p>
*/
private String description;
/**
* <p>
* The date and time the workgroup was created.
* </p>
*/
private java.util.Date creationTime;
/**
* <p>
* The workgroup name.
* </p>
*
* @param name
* The workgroup name.
*/
public void setName(String name) {
this.name = name;
}
/**
* <p>
* The workgroup name.
* </p>
*
* @return The workgroup name.
*/
public String getName() {
return this.name;
}
/**
* <p>
* The workgroup name.
* </p>
*
* @param name
* The workgroup name.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public WorkGroup withName(String name) {
setName(name);
return this;
}
/**
* <p>
* The state of the workgroup: ENABLED or DISABLED.
* </p>
*
* @param state
* The state of the workgroup: ENABLED or DISABLED.
* @see WorkGroupState
*/
public void setState(String state) {
this.state = state;
}
/**
* <p>
* The state of the workgroup: ENABLED or DISABLED.
* </p>
*
* @return The state of the workgroup: ENABLED or DISABLED.
* @see WorkGroupState
*/
public String getState() {
return this.state;
}
/**
* <p>
* The state of the workgroup: ENABLED or DISABLED.
* </p>
*
* @param state
* The state of the workgroup: ENABLED or DISABLED.
* @return Returns a reference to this object so that method calls can be chained together.
* @see WorkGroupState
*/
public WorkGroup withState(String state) {
setState(state);
return this;
}
/**
* <p>
* The state of the workgroup: ENABLED or DISABLED.
* </p>
*
* @param state
* The state of the workgroup: ENABLED or DISABLED.
* @return Returns a reference to this object so that method calls can be chained together.
* @see WorkGroupState
*/
public WorkGroup withState(WorkGroupState state) {
this.state = state.toString();
return this;
}
/**
* <p>
* The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the
* encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for
* the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount
* of data scanned per query or per workgroup. The workgroup settings override is specified in
* EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
* </p>
*
* @param configuration
* The configuration of the workgroup, which includes the location in Amazon S3 where query results are
* stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch
* Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the
* data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings
* override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
*/
public void setConfiguration(WorkGroupConfiguration configuration) {
this.configuration = configuration;
}
/**
* <p>
* The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the
* encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for
* the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount
* of data scanned per query or per workgroup. The workgroup settings override is specified in
* EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
* </p>
*
* @return The configuration of the workgroup, which includes the location in Amazon S3 where query results are
* stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch
* Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the
* data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings
* override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
*/
public WorkGroupConfiguration getConfiguration() {
return this.configuration;
}
/**
* <p>
* The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the
* encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for
* the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount
* of data scanned per query or per workgroup. The workgroup settings override is specified in
* EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
* </p>
*
* @param configuration
* The configuration of the workgroup, which includes the location in Amazon S3 where query results are
* stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch
* Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the
* data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings
* override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See
* <a>WorkGroupConfiguration$EnforceWorkGroupConfiguration</a>.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public WorkGroup withConfiguration(WorkGroupConfiguration configuration) {
setConfiguration(configuration);
return this;
}
/**
* <p>
* The workgroup description.
* </p>
*
* @param description
* The workgroup description.
*/
public void setDescription(String description) {
this.description = description;
}
/**
* <p>
* The workgroup description.
* </p>
*
* @return The workgroup description.
*/
public String getDescription() {
return this.description;
}
/**
* <p>
* The workgroup description.
* </p>
*
* @param description
* The workgroup description.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public WorkGroup withDescription(String description) {
setDescription(description);
return this;
}
/**
* <p>
* The date and time the workgroup was created.
* </p>
*
* @param creationTime
* The date and time the workgroup was created.
*/
public void setCreationTime(java.util.Date creationTime) {
this.creationTime = creationTime;
}
/**
* <p>
* The date and time the workgroup was created.
* </p>
*
* @return The date and time the workgroup was created.
*/
public java.util.Date getCreationTime() {
return this.creationTime;
}
/**
* <p>
* The date and time the workgroup was created.
* </p>
*
* @param creationTime
* The date and time the workgroup was created.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public WorkGroup withCreationTime(java.util.Date creationTime) {
setCreationTime(creationTime);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getName() != null)
sb.append("Name: ").append(getName()).append(",");
if (getState() != null)
sb.append("State: ").append(getState()).append(",");
if (getConfiguration() != null)
sb.append("Configuration: ").append(getConfiguration()).append(",");
if (getDescription() != null)
sb.append("Description: ").append(getDescription()).append(",");
if (getCreationTime() != null)
sb.append("CreationTime: ").append(getCreationTime());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof WorkGroup == false)
return false;
WorkGroup other = (WorkGroup) obj;
if (other.getName() == null ^ this.getName() == null)
return false;
if (other.getName() != null && other.getName().equals(this.getName()) == false)
return false;
if (other.getState() == null ^ this.getState() == null)
return false;
if (other.getState() != null && other.getState().equals(this.getState()) == false)
return false;
if (other.getConfiguration() == null ^ this.getConfiguration() == null)
return false;
if (other.getConfiguration() != null && other.getConfiguration().equals(this.getConfiguration()) == false)
return false;
if (other.getDescription() == null ^ this.getDescription() == null)
return false;
if (other.getDescription() != null && other.getDescription().equals(this.getDescription()) == false)
return false;
if (other.getCreationTime() == null ^ this.getCreationTime() == null)
return false;
if (other.getCreationTime() != null && other.getCreationTime().equals(this.getCreationTime()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getName() == null) ? 0 : getName().hashCode());
hashCode = prime * hashCode + ((getState() == null) ? 0 : getState().hashCode());
hashCode = prime * hashCode + ((getConfiguration() == null) ? 0 : getConfiguration().hashCode());
hashCode = prime * hashCode + ((getDescription() == null) ? 0 : getDescription().hashCode());
hashCode = prime * hashCode + ((getCreationTime() == null) ? 0 : getCreationTime().hashCode());
return hashCode;
}
@Override
public WorkGroup clone() {
try {
return (WorkGroup) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.athena.model.transform.WorkGroupMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
" |
"" | "/**
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.reactivex.flowable;
import static org.junit.Assert.assertEquals;
import java.util.*;
import org.junit.Test;
import org.reactivestreams.Publisher;
import io.reactivex.Flowable;
import io.reactivex.FlowableTransformer;
import io.reactivex.flowables.GroupedFlowable;
import io.reactivex.functions.*;
import io.reactivex.subscribers.TestSubscriber;
/**
* Test super/extends of generics.
*
* See https://github.com/Netflix/RxJava/pull/331
*/
public class FlowableCovarianceTest {
/**
* This won't compile if super/extends isn't done correctly on generics.
*/
@Test
public void testCovarianceOfFrom() {
Flowable.<Movie> just(new HorrorMovie());
Flowable.<Movie> fromIterable(new ArrayList<HorrorMovie>());
// Observable.<HorrorMovie>from(new Movie()); // may not compile
}
@Test
public void testSortedList() {
Comparator<Media> sortFunction = new Comparator<Media>() {
@Override
public int compare(Media t1, Media t2) {
return 1;
}
};
// this one would work without the covariance generics
Flowable<Media> f = Flowable.just(new Movie(), new TVSeason(), new Album());
f.toSortedList(sortFunction);
// this one would NOT work without the covariance generics
Flowable<Movie> f2 = Flowable.just(new Movie(), new ActionMovie(), new HorrorMovie());
f2.toSortedList(sortFunction);
}
@Test
public void testGroupByCompose() {
Flowable<Movie> movies = Flowable.just(new HorrorMovie(), new ActionMovie(), new Movie());
TestSubscriber<String> ts = new TestSubscriber<String>();
movies
.groupBy(new Function<Movie, Object>() {
@Override
public Object apply(Movie v) {
return v.getClass();
}
})
.doOnNext(new Consumer<GroupedFlowable<Object, Movie>>() {
@Override
public void accept(GroupedFlowable<Object, Movie> g) {
System.out.println(g.getKey());
}
})
.flatMap(new Function<GroupedFlowable<Object, Movie>, Publisher<String>>() {
@Override
public Publisher<String> apply(GroupedFlowable<Object, Movie> g) {
return g
.doOnNext(new Consumer<Movie>() {
@Override
public void accept(Movie v) {
System.out.println(v);
}
})
.compose(new FlowableTransformer<Movie, Movie>() {
@Override
public Publisher<Movie> apply(Flowable<Movie> m) {
return m.concatWith(Flowable.just(new ActionMovie()));
}
}
)
.map(new Function<Object, String>() {
@Override
public String apply(Object v) {
return v.toString();
}
});
}
})
.subscribe(ts);
ts.assertTerminated();
ts.assertNoErrors();
// System.out.println(ts.getOnNextEvents());
assertEquals(6, ts.valueCount());
}
@SuppressWarnings("unused")
@Test
public void testCovarianceOfCompose() {
Flowable<HorrorMovie> movie = Flowable.just(new HorrorMovie());
Flowable<Movie> movie2 = movie.compose(new FlowableTransformer<HorrorMovie, Movie>() {
@Override
public Publisher<Movie> apply(Flowable<HorrorMovie> t) {
return Flowable.just(new Movie());
}
});
}
@SuppressWarnings("unused")
@Test
public void testCovarianceOfCompose2() {
Flowable<Movie> movie = Flowable.<Movie> just(new HorrorMovie());
Flowable<HorrorMovie> movie2 = movie.compose(new FlowableTransformer<Movie, HorrorMovie>() {
@Override
public Publisher<HorrorMovie> apply(Flowable<Movie> t) {
return Flowable.just(new HorrorMovie());
}
});
}
@SuppressWarnings("unused")
@Test
public void testCovarianceOfCompose3() {
Flowable<Movie> movie = Flowable.<Movie>just(new HorrorMovie());
Flowable<HorrorMovie> movie2 = movie.compose(new FlowableTransformer<Movie, HorrorMovie>() {
@Override
public Publisher<HorrorMovie> apply(Flowable<Movie> t) {
return Flowable.just(new HorrorMovie()).map(new Function<HorrorMovie, HorrorMovie>() {
@Override
public HorrorMovie apply(HorrorMovie v) {
return v;
}
});
}
}
);
}
@SuppressWarnings("unused")
@Test
public void testCovarianceOfCompose4() {
Flowable<HorrorMovie> movie = Flowable.just(new HorrorMovie());
Flowable<HorrorMovie> movie2 = movie.compose(new FlowableTransformer<HorrorMovie, HorrorMovie>() {
@Override
public Publisher<HorrorMovie> apply(Flowable<HorrorMovie> t1) {
return t1.map(new Function<HorrorMovie, HorrorMovie>() {
@Override
public HorrorMovie apply(HorrorMovie v) {
return v;
}
});
}
});
}
@Test
public void testComposeWithDeltaLogic() {
List<Movie> list1 = Arrays.asList(new Movie(), new HorrorMovie(), new ActionMovie());
List<Movie> list2 = Arrays.asList(new ActionMovie(), new Movie(), new HorrorMovie(), new ActionMovie());
Flowable<List<Movie>> movies = Flowable.just(list1, list2);
movies.compose(deltaTransformer);
}
static Function<List<List<Movie>>, Flowable<Movie>> calculateDelta = new Function<List<List<Movie>>, Flowable<Movie>>() {
@Override
public Flowable<Movie> apply(List<List<Movie>> listOfLists) {
if (listOfLists.size() == 1) {
return Flowable.fromIterable(listOfLists.get(0));
} else {
// diff the two
List<Movie> newList = listOfLists.get(1);
List<Movie> oldList = new ArrayList<Movie>(listOfLists.get(0));
Set<Movie> delta = new LinkedHashSet<Movie>();
delta.addAll(newList);
// remove all that match in old
delta.removeAll(oldList);
// filter oldList to those that aren't in the newList
oldList.removeAll(newList);
// for all left in the oldList we'll create DROP events
for (@SuppressWarnings("unused") Movie old : oldList) {
delta.add(new Movie());
}
return Flowable.fromIterable(delta);
}
}
};
static FlowableTransformer<List<Movie>, Movie> deltaTransformer = new FlowableTransformer<List<Movie>, Movie>() {
@Override
public Publisher<Movie> apply(Flowable<List<Movie>> movieList) {
return movieList
.startWith(new ArrayList<Movie>())
.buffer(2, 1)
.skip(1)
.flatMap(calculateDelta);
}
};
/*
* Most tests are moved into their applicable classes such as [Operator]Tests.java
*/
static class Media {
}
static class Movie extends Media {
}
static class HorrorMovie extends Movie {
}
static class ActionMovie extends Movie {
}
static class Album extends Media {
}
static class TVSeason extends Media {
}
static class Rating {
}
static class CoolRating extends Rating {
}
static class Result {
}
static class ExtendedResult extends Result {
}
}
" |
"" | "/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.server.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.accumulo.server.util.time.SimpleTimer;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides a way to push work out to tablet servers via zookeeper and wait for that work to be done. Any tablet server can pick up a work item and process it.
*
* Worker processes watch a zookeeper node for tasks to be performed. After getting an exclusive lock on the node, the worker will perform the task.
*/
public class DistributedWorkQueue {
private static final String LOCKS_NODE = "locks";
private static final Logger log = LoggerFactory.getLogger(DistributedWorkQueue.class);
private ThreadPoolExecutor threadPool;
private ZooReaderWriter zoo = ZooReaderWriter.getInstance();
private String path;
private AccumuloConfiguration config;
private long timerInitialDelay, timerPeriod;
private AtomicInteger numTask = new AtomicInteger(0);
private void lookForWork(final Processor processor, List<String> children) {
if (children.size() == 0)
return;
if (numTask.get() >= threadPool.getCorePoolSize())
return;
Random random = new Random();
Collections.shuffle(children, random);
try {
for (final String child : children) {
if (child.equals(LOCKS_NODE))
continue;
final String lockPath = path + "/locks/" + child;
try {
// no need to use zoolock, because a queue (ephemeral sequential) is not needed
// if can not get the lock right now then do not want to wait
zoo.putEphemeralData(lockPath, new byte[0]);
} catch (NodeExistsException nee) {
// someone else has reserved it
continue;
}
final String childPath = path + "/" + child;
// check to see if another node processed it already
if (!zoo.exists(childPath)) {
zoo.recursiveDelete(lockPath, NodeMissingPolicy.SKIP);
continue;
}
// Great... we got the lock, but maybe we're too busy
if (numTask.get() >= threadPool.getCorePoolSize()) {
zoo.recursiveDelete(lockPath, NodeMissingPolicy.SKIP);
break;
}
log.debug("got lock for " + child);
Runnable task = new Runnable() {
@Override
public void run() {
try {
try {
processor.newProcessor().process(child, zoo.getData(childPath, null));
// if the task fails, then its entry in the Q is not deleted... so it will be retried
try {
zoo.recursiveDelete(childPath, NodeMissingPolicy.SKIP);
} catch (Exception e) {
log.error("Error received when trying to delete entry in zookeeper " + childPath, e);
}
} catch (Exception e) {
log.warn("Failed to process work " + child, e);
}
try {
zoo.recursiveDelete(lockPath, NodeMissingPolicy.SKIP);
} catch (Exception e) {
log.error("Error received when trying to delete entry in zookeeper " + childPath, e);
}
} finally {
numTask.decrementAndGet();
}
try {
// its important that this is called after numTask is decremented
lookForWork(processor, zoo.getChildren(path));
} catch (KeeperException e) {
log.error("Failed to look for work", e);
} catch (InterruptedException e) {
log.info("Interrupted looking for work", e);
}
}
};
numTask.incrementAndGet();
threadPool.execute(task);
}
} catch (Throwable t) {
log.error("Unexpected error", t);
}
}
public interface Processor {
Processor newProcessor();
void process(String workID, byte[] data);
}
public DistributedWorkQueue(String path, AccumuloConfiguration config) {
// Preserve the old delay and period
this(path, config, new Random().nextInt(60 * 1000), 60 * 1000);
}
public DistributedWorkQueue(String path, AccumuloConfiguration config, long timerInitialDelay, long timerPeriod) {
this.path = path;
this.config = config;
this.timerInitialDelay = timerInitialDelay;
this.timerPeriod = timerPeriod;
}
public void startProcessing(final Processor processor, ThreadPoolExecutor executorService) throws KeeperException, InterruptedException {
threadPool = executorService;
zoo.mkdirs(path);
zoo.mkdirs(path + "/" + LOCKS_NODE);
List<String> children = zoo.getChildren(path, new Watcher() {
@Override
public void process(WatchedEvent event) {
switch (event.getType()) {
case NodeChildrenChanged:
if (event.getPath().equals(path))
try {
lookForWork(processor, zoo.getChildren(path, this));
} catch (KeeperException e) {
log.error("Failed to look for work", e);
} catch (InterruptedException e) {
log.info("Interrupted looking for work", e);
}
else
log.info("Unexpected path for NodeChildrenChanged event " + event.getPath());
break;
case NodeCreated:
case NodeDataChanged:
case NodeDeleted:
case None:
log.info("Got unexpected zookeeper event: " + event.getType() + " for " + path);
break;
}
}
});
lookForWork(processor, children);
// Add a little jitter to avoid all the tservers slamming zookeeper at once
SimpleTimer.getInstance(config).schedule(new Runnable() {
@Override
public void run() {
log.debug("Looking for work in " + path);
try {
lookForWork(processor, zoo.getChildren(path));
} catch (KeeperException e) {
log.error("Failed to look for work", e);
} catch (InterruptedException e) {
log.info("Interrupted looking for work", e);
}
}
}, timerInitialDelay, timerPeriod);
}
/**
* Adds work to the queue, automatically converting the String to bytes using UTF-8
*/
public void addWork(String workId, String data) throws KeeperException, InterruptedException {
addWork(workId, data.getBytes(UTF_8));
}
public void addWork(String workId, byte[] data) throws KeeperException, InterruptedException {
if (workId.equalsIgnoreCase(LOCKS_NODE))
throw new IllegalArgumentException("locks is reserved work id");
zoo.mkdirs(path);
zoo.putPersistentData(path + "/" + workId, data, NodeExistsPolicy.SKIP);
}
public List<String> getWorkQueued() throws KeeperException, InterruptedException {
ArrayList<String> children = new ArrayList<String>(zoo.getChildren(path));
children.remove(LOCKS_NODE);
return children;
}
public void waitUntilDone(Set<String> workIDs) throws KeeperException, InterruptedException {
final Object condVar = new Object();
Watcher watcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
switch (event.getType()) {
case NodeChildrenChanged:
synchronized (condVar) {
condVar.notify();
}
break;
case NodeCreated:
case NodeDataChanged:
case NodeDeleted:
case None:
log.info("Got unexpected zookeeper event: " + event.getType() + " for " + path);
break;
}
}
};
List<String> children = zoo.getChildren(path, watcher);
while (!Collections.disjoint(children, workIDs)) {
synchronized (condVar) {
condVar.wait(10000);
}
children = zoo.getChildren(path, watcher);
}
}
}
" |
"" | "package com.fincatto.documentofiscal.nfe310.classes.inutilizacao;
import org.junit.Assert;
import org.junit.Test;
import com.fincatto.documentofiscal.DFAmbiente;
import com.fincatto.documentofiscal.DFUnidadeFederativa;
import com.fincatto.documentofiscal.nfe310.FabricaDeObjetosFake;
import com.fincatto.documentofiscal.nfe310.classes.evento.inutilizacao.NFEventoInutilizacaoDados;
public class NFEventoCancelamentoDadosTest {
@Test(expected = IllegalStateException.class)
public void naoDevePermitirModeloInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setModeloDocumentoFiscal("75");
}
@Test
public void devePermitirAmbosModelosDeNFe() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setModeloDocumentoFiscal("55");
dados.setModeloDocumentoFiscal("65");
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirJustificativaInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
try {
dados.setJustificativa("rcAYGVaFoYcW8q");
} catch (final IllegalStateException e) {
dados.setJustificativa("WDou2V29BncPEppZRB7XnD7BAQPYFgewTmEu2kCCRbESq01soGjLJVxhJmcYMxAY3t0nXCXmWJh8suPIikxWuUxaJCAMBKUiMMm04AyySjtjSrNqThH0W14IpNWM5bCkKOqyoV58HFVxfZLfZOYmn7SCUW3QTOoaos09TFbMMIccnW2kfVMrb8T419Mpy60IIjo6hqORvMPZiDKjSrmpWiYLCIGLLBpqjbO9XmSHryazw2XoT2yJMpfE9N53GCRh");
}
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirCNPJInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setCnpj("1234567890123");
}
@Test(expected = NumberFormatException.class)
public void naoDevePermitirAnoDiferenteDeDuasCasas() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
try {
dados.setAno(9);
} catch (final NumberFormatException e) {
dados.setAno(100);
}
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirNumeroNFInicialInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
try {
dados.setNumeroNFInicial("");
} catch (final IllegalStateException e) {
dados.setNumeroNFInicial("1000000000");
}
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirNumeroNFFinalInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
try {
dados.setNumeroNFFinal("");
} catch (final IllegalStateException e) {
dados.setNumeroNFFinal("1000000000");
}
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirSerieInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
try {
dados.setSerie("");
} catch (final IllegalStateException e) {
dados.setSerie("1000");
}
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirServicoInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setNomeServico("inutiliza");
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirIDInvalido() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setIdentificador("IDw6cRIPJzP4sv6gBWQFCNcFSITQK7rOxjmBFcW2Mzf");
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirIdentificadorNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirAmbienteNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirServicoNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirUFNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirAnoNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirCNPJNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirModeloNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirSerieNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirNumeroNotaInicialNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirNumeroNotaFinalNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setJustificativa("u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test(expected = IllegalStateException.class)
public void naoDevePermitirJustificativaNulo() {
final NFEventoInutilizacaoDados dados = new NFEventoInutilizacaoDados();
dados.setAmbiente(DFAmbiente.HOMOLOGACAO);
dados.setAno(15);
dados.setCnpj("12345678901234");
dados.setIdentificador("ID55605654557305333405403926218856863798956");
dados.setModeloDocumentoFiscal("55");
dados.setNomeServico("INUTILIZAR");
dados.setNumeroNFInicial("1");
dados.setNumeroNFFinal("999999999");
dados.setSerie("999");
dados.setUf(DFUnidadeFederativa.SC);
dados.toString();
}
@Test
public void deveGerarXMLDeAcordoComOPadraoEstabelecido() {
final String xmlEsperado = "<infInut Id=\"ID42161234567890123455123123456789987654321\"><tpAmb>2</tpAmb><xServ>INUTILIZAR</xServ><cUF>42</cUF><ano>16</ano><CNPJ>12345678901234</CNPJ><mod>55</mod><serie>123</serie><nNFIni>123456789</nNFIni><nNFFin>987654321</nNFFin><xJust>u2MGhwXFQDFtSuKsLkmgowBZNNhOWBL4JKIqYnIj5iDPTAUqHSwKL1O2olgmZwigRS1P58Zoc1qDxzqmvv3hBE1LYuLHNPbFXuLwM5ZxvH7xfSpnkX5VBGjrkR3cuiXLr1uz3chFb9JrNY5xU3X0eF9Byc2Q9TkPbFyPj7iRwwQVMNt6FGvpUyRMHGmhSDYhFRD2Dst0UaauvA4V0breWHyN4WUSEm9z377jXHNwtVLQQCxB2wcEIZGWVIT4CF5</xJust></infInut>";
Assert.assertEquals(xmlEsperado, FabricaDeObjetosFake.getNFEventoInutilizacaoDados().toString());
}
}
" |
- Downloads last month
- 7