svn-gvsig-desktop / trunk / org.gvsig.desktop / org.gvsig.desktop.compat.cdc / org.gvsig.fmap.dal / org.gvsig.fmap.dal.file / org.gvsig.fmap.dal.file.dbf / src / main / java / org / gvsig / fmap / dal / store / dbf / utils / DbaseFileHeader.java @ 43978
History | View | Annotate | Download (25.5 KB)
1 |
/**
|
---|---|
2 |
* gvSIG. Desktop Geographic Information System.
|
3 |
*
|
4 |
* Copyright (C) 2007-2013 gvSIG Association.
|
5 |
*
|
6 |
* This program is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU General Public License
|
8 |
* as published by the Free Software Foundation; either version 3
|
9 |
* of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This program is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
14 |
* GNU General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU General Public License
|
17 |
* along with this program; if not, write to the Free Software
|
18 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
19 |
* MA 02110-1301, USA.
|
20 |
*
|
21 |
* For any additional information, do not hesitate to contact us
|
22 |
* at info AT gvsig.com, or visit our website www.gvsig.com.
|
23 |
*/
|
24 |
package org.gvsig.fmap.dal.store.dbf.utils; |
25 |
|
26 |
import java.io.IOException; |
27 |
import java.io.UnsupportedEncodingException; |
28 |
import java.nio.ByteBuffer; |
29 |
import java.nio.ByteOrder; |
30 |
import java.nio.channels.FileChannel; |
31 |
import java.nio.charset.Charset; |
32 |
import java.util.ArrayList; |
33 |
import java.util.Calendar; |
34 |
import java.util.Date; |
35 |
import java.util.Iterator; |
36 |
import java.util.List; |
37 |
import java.util.Set; |
38 |
import java.util.SortedMap; |
39 |
|
40 |
import org.gvsig.fmap.dal.DataTypes; |
41 |
import org.gvsig.fmap.dal.exception.UnsupportedVersionException; |
42 |
import org.gvsig.fmap.dal.feature.FeatureAttributeDescriptor; |
43 |
import org.gvsig.fmap.dal.feature.FeatureType; |
44 |
import org.gvsig.fmap.dal.feature.exception.AttributeFeatureTypeNotSuportedException; |
45 |
import org.gvsig.tools.ToolsLocator; |
46 |
import org.gvsig.utils.bigfile.BigByteBuffer2; |
47 |
|
48 |
|
49 |
|
50 |
/**
|
51 |
* Class to represent the header of a Dbase III file. Creation date: (5/15/2001
|
52 |
* 5:15:30 PM)
|
53 |
*/
|
54 |
public class DbaseFileHeader { |
55 |
// Constant for the size of a record
|
56 |
private final int FILE_DESCRIPTOR_SIZE = 32; |
57 |
|
58 |
// type of the file, must be 03h
|
59 |
private static final byte MAGIC = 0x03; |
60 |
|
61 |
private static final int MINIMUM_HEADER = 33; |
62 |
|
63 |
// type of the file, must be 03h
|
64 |
private int myFileType = 0x03; |
65 |
|
66 |
// Date the file was last updated.
|
67 |
private Date myUpdateDate = new Date(); |
68 |
|
69 |
// Number of records in the datafile
|
70 |
private int myNumRecords = 0; |
71 |
|
72 |
// Length of the header structure
|
73 |
private int myHeaderLength; |
74 |
|
75 |
/**
|
76 |
* Length of the records. Set to 1 as the default value as if there is
|
77 |
* not any defined column, at least the deleted status initial byte
|
78 |
* is taken into account.
|
79 |
*/
|
80 |
private int myRecordLength = 1; |
81 |
|
82 |
// Number of fields in the record.
|
83 |
private int myNumFields; |
84 |
|
85 |
// collection of header records.
|
86 |
private DbaseFieldDescriptor[] myFieldDescriptions; |
87 |
|
88 |
private int myLanguageID = 0x00; |
89 |
/**
|
90 |
* Only considered when myLanguageID = 0x00;
|
91 |
*/
|
92 |
private String charset = null; |
93 |
|
94 |
private List<String> encodingSupportedByString = null; |
95 |
|
96 |
private int origLanguageID = 0x00; |
97 |
|
98 |
/**
|
99 |
* Headers must always be encoded using ASCII/ISO-8859-1, regardless the
|
100 |
* encoding of the records
|
101 |
*/
|
102 |
private static final Charset headerCharset = Charset.forName("ISO-8859-1"); |
103 |
|
104 |
/**
|
105 |
* DbaseFileHreader constructor comment.
|
106 |
*/
|
107 |
public DbaseFileHeader() {
|
108 |
super();
|
109 |
|
110 |
encodingSupportedByString = new ArrayList<String>(); |
111 |
SortedMap<String, Charset> m = Charset.availableCharsets(); |
112 |
Set<String> k = m.keySet(); |
113 |
Iterator<String> it = k.iterator(); |
114 |
while(it.hasNext()) {
|
115 |
encodingSupportedByString.add(it.next()); |
116 |
} |
117 |
} |
118 |
|
119 |
/**
|
120 |
* Add a column to this DbaseFileHeader. The type is one of (C N L or D)
|
121 |
* character, number, logical(true/false), or date. The Field length is
|
122 |
* the total length in bytes reserved for this column. The decimal count
|
123 |
* only applies to numbers(N), and floating point values (F), and refers
|
124 |
* to the number of characters to reserve after the decimal point.
|
125 |
*
|
126 |
* @param inFieldName DOCUMENT ME!
|
127 |
* @param inFieldType DOCUMENT ME!
|
128 |
* @param inFieldLength DOCUMENT ME!
|
129 |
* @param inDecimalCount DOCUMENT ME!
|
130 |
* @throws org.gvsig.fmap.dal.feature.exception.AttributeFeatureTypeNotSuportedException
|
131 |
|
132 |
*/
|
133 |
public void addColumn(String inFieldName, char inFieldType, |
134 |
int inFieldLength, int inDecimalCount) |
135 |
throws AttributeFeatureTypeNotSuportedException {
|
136 |
if (inFieldLength <= 0) { |
137 |
inFieldLength = 1;
|
138 |
} |
139 |
|
140 |
if (myFieldDescriptions == null) { |
141 |
myFieldDescriptions = new DbaseFieldDescriptor[0]; |
142 |
} |
143 |
|
144 |
int tempLength = 1; // the length is used for the offset, and there is a * for deleted as the first byte |
145 |
DbaseFieldDescriptor[] tempFieldDescriptors = new DbaseFieldDescriptor[myFieldDescriptions.length + |
146 |
1];
|
147 |
|
148 |
for (int i = 0; i < myFieldDescriptions.length; i++) { |
149 |
myFieldDescriptions[i].myFieldDataAddress = tempLength; |
150 |
tempLength = tempLength + myFieldDescriptions[i].myFieldLength; |
151 |
tempFieldDescriptors[i] = myFieldDescriptions[i]; |
152 |
} |
153 |
|
154 |
tempFieldDescriptors[myFieldDescriptions.length] = new DbaseFieldDescriptor();
|
155 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldLength = inFieldLength; |
156 |
tempFieldDescriptors[myFieldDescriptions.length].myDecimalCount = inDecimalCount; |
157 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldDataAddress = tempLength; |
158 |
|
159 |
// set the field name
|
160 |
String tempFieldName = inFieldName;
|
161 |
|
162 |
if (tempFieldName == null) { |
163 |
tempFieldName = "NoName";
|
164 |
} |
165 |
|
166 |
if (tempFieldName.length() > DbaseFile.MAX_FIELD_NAME_LENGTH) {
|
167 |
tempFieldName = tempFieldName.substring(0, DbaseFile.MAX_FIELD_NAME_LENGTH);
|
168 |
warn("FieldName " + inFieldName +
|
169 |
" is longer than "+DbaseFile.MAX_FIELD_NAME_LENGTH+" characters, truncating to " + |
170 |
tempFieldName); |
171 |
} |
172 |
|
173 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldName = tempFieldName; |
174 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldName_trim = tempFieldName |
175 |
.trim(); |
176 |
|
177 |
// the field type
|
178 |
if ((inFieldType == 'C') || (inFieldType == 'c')) { |
179 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldType = 'C';
|
180 |
|
181 |
if (inFieldLength > 254) { |
182 |
warn("Field Length for " + inFieldName + " set to " + |
183 |
inFieldLength + |
184 |
" Which is longer than 254, not consistent with dbase III");
|
185 |
} |
186 |
} else if ((inFieldType == 'S') || (inFieldType == 's')) { |
187 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldType = 'C';
|
188 |
warn("Field type for " + inFieldName +
|
189 |
" set to S which is flat out wrong people!, I am setting this to C, in the hopes you meant character.");
|
190 |
|
191 |
if (inFieldLength > 254) { |
192 |
warn("Field Length for " + inFieldName + " set to " + |
193 |
inFieldLength + |
194 |
" Which is longer than 254, not consistent with dbase III");
|
195 |
} |
196 |
|
197 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldLength = 8;
|
198 |
} else if ((inFieldType == 'D') || (inFieldType == 'd')) { |
199 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldType = 'D';
|
200 |
|
201 |
if (inFieldLength != 8) { |
202 |
warn("Field Length for " + inFieldName + " set to " + |
203 |
inFieldLength + " Setting to 8 digets YYYYMMDD");
|
204 |
} |
205 |
|
206 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldLength = 8;
|
207 |
} else if ((inFieldType == 'F') || (inFieldType == 'f')) { |
208 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldType = 'F';
|
209 |
|
210 |
if (inFieldLength > 20) { |
211 |
warn("Field Length for " + inFieldName + " set to " + |
212 |
inFieldLength + |
213 |
" Preserving length, but should be set to Max of 20 not valid for dbase IV, and UP specification, not present in dbaseIII.");
|
214 |
} |
215 |
} else if ((inFieldType == 'N') || (inFieldType == 'n')) { |
216 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldType = 'N';
|
217 |
|
218 |
if (inFieldLength > 18) { |
219 |
warn("Field Length for " + inFieldName + " set to " + |
220 |
inFieldLength + |
221 |
" Preserving length, but should be set to Max of 18 for dbase III specification.");
|
222 |
} |
223 |
|
224 |
if (inDecimalCount < 0) { |
225 |
warn("Field Decimal Position for " + inFieldName + " set to " + |
226 |
inDecimalCount + |
227 |
" Setting to 0 no decimal data will be saved.");
|
228 |
tempFieldDescriptors[myFieldDescriptions.length].myDecimalCount = 0;
|
229 |
} |
230 |
//
|
231 |
// if (inDecimalCount > (inFieldLength - 1)) {
|
232 |
// warn("Field Decimal Position for " + inFieldName + " set to " +
|
233 |
// inDecimalCount + " Setting to " + (inFieldLength - 1) +
|
234 |
// " no non decimal data will be saved.");
|
235 |
// tempFieldDescriptors[myFieldDescriptions.length].myDecimalCount = inFieldLength -
|
236 |
// 1;
|
237 |
// }
|
238 |
} else if ((inFieldType == 'L') || (inFieldType == 'l')) { |
239 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldType = 'L';
|
240 |
|
241 |
if (inFieldLength != 1) { |
242 |
warn("Field Length for " + inFieldName + " set to " + |
243 |
inFieldLength + |
244 |
" Setting to length of 1 for logical fields.");
|
245 |
} |
246 |
|
247 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldLength = 1;
|
248 |
} else {
|
249 |
throw new AttributeFeatureTypeNotSuportedException(tempFieldName, |
250 |
inFieldType, ToolsLocator.getDataTypesManager().getTypeName(inFieldType), "DBF");
|
251 |
} |
252 |
|
253 |
// the length of a record
|
254 |
tempLength = tempLength + |
255 |
tempFieldDescriptors[myFieldDescriptions.length].myFieldLength; |
256 |
|
257 |
// set the new fields.
|
258 |
myFieldDescriptions = tempFieldDescriptors; |
259 |
myHeaderLength = 33 + (32 * myFieldDescriptions.length); |
260 |
myNumFields = myFieldDescriptions.length; |
261 |
myRecordLength = tempLength; |
262 |
} |
263 |
|
264 |
/**
|
265 |
* Remove a column from this DbaseFileHeader.
|
266 |
*
|
267 |
* @param inFieldName DOCUMENT ME!
|
268 |
*
|
269 |
* @return index of the removed column, -1 if no found
|
270 |
*/
|
271 |
public int removeColumn(String inFieldName) { |
272 |
int retCol = -1; |
273 |
int tempLength = 1; |
274 |
DbaseFieldDescriptor[] tempFieldDescriptors = new DbaseFieldDescriptor[myFieldDescriptions.length - |
275 |
1];
|
276 |
|
277 |
for (int i = 0, j = 0; i < myFieldDescriptions.length; i++) { |
278 |
if (!inFieldName.equalsIgnoreCase(
|
279 |
myFieldDescriptions[i].myFieldName.trim())) { |
280 |
// if this is the last field and we still haven't found the
|
281 |
// named field
|
282 |
if ((i == j) && (i == (myFieldDescriptions.length - 1))) { |
283 |
// System.err.println("Could not find a field named '" +
|
284 |
// inFieldName + "' for removal");
|
285 |
|
286 |
return retCol;
|
287 |
} |
288 |
|
289 |
tempFieldDescriptors[j] = myFieldDescriptions[i]; |
290 |
tempFieldDescriptors[j].myFieldDataAddress = tempLength; |
291 |
tempLength += tempFieldDescriptors[j].myFieldLength; |
292 |
|
293 |
// only increment j on non-matching fields
|
294 |
j++; |
295 |
} else {
|
296 |
retCol = i; |
297 |
} |
298 |
} |
299 |
|
300 |
// set the new fields.
|
301 |
myFieldDescriptions = tempFieldDescriptors; |
302 |
myHeaderLength = 33 + (32 * myFieldDescriptions.length); |
303 |
myNumFields = myFieldDescriptions.length; |
304 |
myRecordLength = tempLength; |
305 |
|
306 |
return retCol;
|
307 |
} |
308 |
|
309 |
/**
|
310 |
* DOCUMENT ME!
|
311 |
*
|
312 |
* @param inWarn DOCUMENT ME!
|
313 |
*/
|
314 |
private void warn(String inWarn) { |
315 |
//TODO Descomentar esto cuando tenga la clase warning support
|
316 |
// warnings.warn(inWarn);
|
317 |
} |
318 |
|
319 |
/**
|
320 |
* Return the Field Descriptor for the given field.
|
321 |
* @param inIndex, the index of the requeted field description
|
322 |
* @return the dbase field descriptor.
|
323 |
*/
|
324 |
public DbaseFieldDescriptor getFieldDescription(int inIndex) { |
325 |
return myFieldDescriptions[inIndex];
|
326 |
} |
327 |
|
328 |
// Retrieve the length of the field at the given index
|
329 |
public int getFieldLength(int inIndex) { |
330 |
return myFieldDescriptions[inIndex].myFieldLength;
|
331 |
} |
332 |
|
333 |
// Retrieve the location of the decimal point within the field.
|
334 |
public int getFieldDecimalCount(int inIndex) { |
335 |
return myFieldDescriptions[inIndex].myDecimalCount;
|
336 |
} |
337 |
|
338 |
// Retrieve the Name of the field at the given index
|
339 |
public String getFieldName(int inIndex) { |
340 |
return myFieldDescriptions[inIndex].myFieldName;
|
341 |
} |
342 |
|
343 |
public int getFieldIndex(String name) { |
344 |
for (int i = 0; i < myFieldDescriptions.length; i++) { |
345 |
if (myFieldDescriptions[i].myFieldName_trim
|
346 |
.equalsIgnoreCase(name)) { |
347 |
return i;
|
348 |
} |
349 |
} |
350 |
return -1; |
351 |
} |
352 |
|
353 |
// Retrieve the type of field at the given index
|
354 |
public char getFieldType(int inIndex) { |
355 |
return myFieldDescriptions[inIndex].myFieldType;
|
356 |
} |
357 |
|
358 |
/**
|
359 |
* Return the date this file was last updated.
|
360 |
*
|
361 |
* @return DOCUMENT ME!
|
362 |
*/
|
363 |
public Date getLastUpdateDate() { |
364 |
return myUpdateDate;
|
365 |
} |
366 |
|
367 |
/**
|
368 |
* Return the number of fields in the records.
|
369 |
*
|
370 |
* @return DOCUMENT ME!
|
371 |
*/
|
372 |
public int getNumFields() { |
373 |
return myNumFields;
|
374 |
} |
375 |
|
376 |
/**
|
377 |
* Return the number of records in the file
|
378 |
*
|
379 |
* @return DOCUMENT ME!
|
380 |
*/
|
381 |
public int getNumRecords() { |
382 |
return myNumRecords;
|
383 |
} |
384 |
|
385 |
/**
|
386 |
* Return the length of the records in bytes.
|
387 |
*
|
388 |
* @return DOCUMENT ME!
|
389 |
*/
|
390 |
public int getRecordLength() { |
391 |
return myRecordLength;
|
392 |
} |
393 |
|
394 |
/**
|
395 |
* Return the length of the header
|
396 |
*
|
397 |
* @return DOCUMENT ME!
|
398 |
*/
|
399 |
public int getHeaderLength() { |
400 |
return myHeaderLength;
|
401 |
} |
402 |
|
403 |
/**
|
404 |
* Read the header data from the DBF file.
|
405 |
*
|
406 |
* @param in
|
407 |
* DOCUMENT ME!
|
408 |
* @param charsName
|
409 |
* @throws UnsupportedVersionException
|
410 |
* @throws UnsupportedEncodingException
|
411 |
|
412 |
*/
|
413 |
public void readHeader(BigByteBuffer2 in, String charsName, boolean allowDuplicatedFieldNames) |
414 |
throws UnsupportedVersionException, UnsupportedEncodingException { |
415 |
// type of file.
|
416 |
myFileType = in.get(); |
417 |
|
418 |
if (myFileType != 0x03) { |
419 |
throw new UnsupportedVersionException("DBF", Integer.toHexString(myFileType)); |
420 |
} |
421 |
|
422 |
// parse the update date information.
|
423 |
int tempUpdateYear = in.get();
|
424 |
int tempUpdateMonth = in.get();
|
425 |
int tempUpdateDay = in.get();
|
426 |
tempUpdateYear = tempUpdateYear + 1900;
|
427 |
|
428 |
Calendar c = Calendar.getInstance(); |
429 |
c.set(Calendar.YEAR, tempUpdateYear);
|
430 |
c.set(Calendar.MONTH, tempUpdateMonth - 1); |
431 |
c.set(Calendar.DATE, tempUpdateDay);
|
432 |
myUpdateDate = c.getTime(); |
433 |
|
434 |
// read the number of records.
|
435 |
in.order(ByteOrder.LITTLE_ENDIAN);
|
436 |
myNumRecords = in.getInt(); |
437 |
|
438 |
// read the length of the header structure.
|
439 |
myHeaderLength = in.getShort(); |
440 |
|
441 |
// read the length of a record
|
442 |
myRecordLength = in.getShort(); //posicon 0h
|
443 |
|
444 |
in.order(ByteOrder.BIG_ENDIAN);
|
445 |
|
446 |
// read the language bit (LDID)
|
447 |
in.position(29);
|
448 |
origLanguageID = byteAsUnsigned(in.get()); |
449 |
if (charsName != null) { |
450 |
// ignore the language bit, use the provided charset name
|
451 |
myLanguageID = DbaseCodepage.getLdid(charsName); |
452 |
this.charset = charsName;
|
453 |
} |
454 |
else {
|
455 |
// use the read the language bit
|
456 |
myLanguageID = origLanguageID; |
457 |
charsName = getCharsetName(); |
458 |
} |
459 |
|
460 |
// Posicionamos para empezar a leer los campos.
|
461 |
in.position(32);
|
462 |
|
463 |
// calculate the number of Fields in the header
|
464 |
myNumFields = (myHeaderLength - FILE_DESCRIPTOR_SIZE - 1) / FILE_DESCRIPTOR_SIZE;
|
465 |
|
466 |
// read all of the header records
|
467 |
myFieldDescriptions = new DbaseFieldDescriptor[myNumFields];
|
468 |
int fieldOffset = 0; |
469 |
|
470 |
List fieldNames = new ArrayList<String>(); |
471 |
|
472 |
// FIXME: should field names be always read using ISO8859-1??
|
473 |
for (int i = 0; i < myNumFields; i++) { |
474 |
myFieldDescriptions[i] = new DbaseFieldDescriptor();
|
475 |
|
476 |
// read the field name
|
477 |
byte[] buffer = new byte[11]; |
478 |
in.get(buffer); |
479 |
String fieldName;
|
480 |
fieldName = new String(buffer, headerCharset); |
481 |
|
482 |
if(allowDuplicatedFieldNames){
|
483 |
fieldName = getUniqueFieldName(fieldName, fieldNames); |
484 |
} |
485 |
fieldNames.add(fieldName); |
486 |
|
487 |
myFieldDescriptions[i].myFieldName = fieldName; |
488 |
|
489 |
myFieldDescriptions[i].myFieldName_trim = myFieldDescriptions[i].myFieldName |
490 |
.trim(); |
491 |
|
492 |
// read the field type
|
493 |
myFieldDescriptions[i].myFieldType = (char) in.get();
|
494 |
|
495 |
// read the field data address, offset from the start of the record.
|
496 |
myFieldDescriptions[i].myFieldDataAddress = in.getInt(); |
497 |
|
498 |
// read the field length in bytes
|
499 |
int tempLength = in.get();
|
500 |
|
501 |
if (tempLength < 0) { |
502 |
tempLength = tempLength + 256;
|
503 |
} |
504 |
|
505 |
myFieldDescriptions[i].myFieldLength = tempLength; |
506 |
|
507 |
// read the field decimal count in bytes
|
508 |
myFieldDescriptions[i].myDecimalCount = in.get(); |
509 |
|
510 |
// NUEVO: Calculamos los offsets aqu? para no
|
511 |
// tener que recalcular cada vez que nos piden
|
512 |
// algo.
|
513 |
myFieldDescriptions[i].myFieldDataAddress = fieldOffset; |
514 |
fieldOffset += tempLength; |
515 |
// Fin NUEVO
|
516 |
// read the reserved bytes.
|
517 |
in.position(in.position() + 14);
|
518 |
|
519 |
} |
520 |
|
521 |
// Last byte is a marker for the end of the field definitions.
|
522 |
in.get(); |
523 |
} |
524 |
|
525 |
/**
|
526 |
* Set the number of records in the file
|
527 |
*
|
528 |
* @param inNumRecords DOCUMENT ME!
|
529 |
*/
|
530 |
public void setNumRecords(int inNumRecords) { |
531 |
myNumRecords = inNumRecords; |
532 |
} |
533 |
|
534 |
/**
|
535 |
* Returns the value of the unsigned byte as a short
|
536 |
* Bytes are always signed in Java, so if we are reading a C unsigned byte
|
537 |
* with value > 128, it will appear as a negative value.
|
538 |
*
|
539 |
* In this case, we need to get the original unsigned value and return it as
|
540 |
* short or int, as byte will never correctly store the value in Java.
|
541 |
*
|
542 |
* @return
|
543 |
*/
|
544 |
private int byteAsUnsigned(byte b) { |
545 |
int i;
|
546 |
if (b<0) { |
547 |
i = b & 0xFF;
|
548 |
} |
549 |
else {
|
550 |
i = b; |
551 |
} |
552 |
return i;
|
553 |
} |
554 |
|
555 |
/**
|
556 |
* Class for holding the information assicated with a record.
|
557 |
*/
|
558 |
public class DbaseFieldDescriptor { |
559 |
// Field Name
|
560 |
String myFieldName;
|
561 |
|
562 |
String myFieldName_trim;
|
563 |
|
564 |
// Field Type (C N L D F or M)
|
565 |
char myFieldType;
|
566 |
|
567 |
// Field Data Address offset from the start of the record.
|
568 |
int myFieldDataAddress;
|
569 |
|
570 |
// Length of the data in bytes
|
571 |
int myFieldLength;
|
572 |
|
573 |
// Field decimal count in Binary, indicating where the decimal is
|
574 |
int myDecimalCount;
|
575 |
} |
576 |
|
577 |
/**
|
578 |
* Gets the Language driver IDs (code page) defined on the file header (or guessed
|
579 |
* from the provided charset)
|
580 |
*
|
581 |
* Some examples:
|
582 |
* 01h DOS USA code page 437
|
583 |
02h DOS Multilingual code page 850
|
584 |
03h Windows ANSI code page 1252
|
585 |
04h Standard Macintosh
|
586 |
64h EE MS-DOS code page 852
|
587 |
65h Nordic MS-DOS code page 865
|
588 |
66h Russian MS-DOS code page 866
|
589 |
67h Icelandic MS-DOS
|
590 |
68h Kamenicky (Czech) MS-DOS
|
591 |
69h Mazovia (Polish) MS-DOS
|
592 |
6Ah Greek MS-DOS (437G)
|
593 |
6Bh Turkish MS-DOS
|
594 |
96h Russian Macintosh
|
595 |
97h Eastern European Macintosh
|
596 |
98h Greek Macintosh
|
597 |
C8h Windows EE code page 1250
|
598 |
C9h Russian Windows
|
599 |
CAh Turkish Windows
|
600 |
CBh Greek Windows
|
601 |
|
602 |
See the java equivalences in {@link DbaseCodepage#dbfLdid} & {@link DbaseCodepage#ldidJava} objects.
|
603 |
|
604 |
See some others here: https://github.com/infused/dbf/blob/master/docs/supported_encodings.csv
|
605 |
* @return
|
606 |
*/
|
607 |
public int getLanguageID() { |
608 |
|
609 |
return myLanguageID;
|
610 |
} |
611 |
|
612 |
|
613 |
|
614 |
public static DbaseFileHeader createDbaseHeader(FeatureType featureType) |
615 |
throws AttributeFeatureTypeNotSuportedException {
|
616 |
return createDbaseHeader(featureType, null); |
617 |
} |
618 |
|
619 |
public static DbaseFileHeader createDbaseHeader(FeatureType featureType, String charsetName) |
620 |
throws AttributeFeatureTypeNotSuportedException {
|
621 |
DbaseFileHeader header = new DbaseFileHeader();
|
622 |
Iterator iterator=featureType.iterator();
|
623 |
header.myLanguageID = DbaseCodepage.getLdid(charsetName); |
624 |
header.charset = charsetName; |
625 |
while (iterator.hasNext()) {
|
626 |
FeatureAttributeDescriptor descriptor = (FeatureAttributeDescriptor) iterator.next(); |
627 |
|
628 |
if(descriptor.isComputed()){
|
629 |
continue;
|
630 |
} |
631 |
int type = descriptor.getType();
|
632 |
String colName = descriptor.getName();
|
633 |
|
634 |
int fieldLen = descriptor.getSize(); // TODO aqu? el |
635 |
// tama?o no es
|
636 |
// correcto hay que
|
637 |
// calcularlo, ahora
|
638 |
// mismo est? puesto
|
639 |
// a pi??n.
|
640 |
int decimales = descriptor.getPrecision();
|
641 |
if ((type==DataTypes.DOUBLE || type==DataTypes.FLOAT) && decimales==0){ |
642 |
decimales=1;
|
643 |
} |
644 |
|
645 |
if (DataTypes.DOUBLE == type || DataTypes.FLOAT == type
|
646 |
|| DataTypes.INT == type || DataTypes.LONG == type) { |
647 |
header.addColumn(colName, 'N', Math.min(fieldLen, 18), |
648 |
decimales); |
649 |
} else if (DataTypes.DATE == type) { |
650 |
header.addColumn(colName, 'D', fieldLen, 0); |
651 |
} else if (DataTypes.BOOLEAN == type) { |
652 |
header.addColumn(colName, 'L', 1, 0); |
653 |
} else if (DataTypes.STRING == type) { |
654 |
header.addColumn(colName, 'C', Math.min(254, fieldLen), 0); |
655 |
} else {
|
656 |
// Si no sabemos lo que es intentaremos guardarlo como un string
|
657 |
header.addColumn(colName, 'C', Math.min(254, fieldLen<10?10:fieldLen), 0); |
658 |
} |
659 |
|
660 |
} |
661 |
return header;
|
662 |
} |
663 |
|
664 |
/**
|
665 |
* Write the header data to the DBF file.
|
666 |
*
|
667 |
* @param out
|
668 |
* A channel to write to. If you have an OutputStream you can
|
669 |
* obtain the correct channel by using
|
670 |
* java.nio.Channels.newChannel(OutputStream out).
|
671 |
*
|
672 |
* @throws IOException
|
673 |
* If errors occur.
|
674 |
*/
|
675 |
public void writeHeader(FileChannel out) throws IOException { |
676 |
// take care of the annoying case where no records have been added...
|
677 |
if (myHeaderLength <= 0) { |
678 |
myHeaderLength = MINIMUM_HEADER; |
679 |
} |
680 |
|
681 |
// Desde el principio
|
682 |
out.position(0);
|
683 |
|
684 |
ByteBuffer buffer = ByteBuffer.allocateDirect(myHeaderLength); |
685 |
buffer.order(ByteOrder.LITTLE_ENDIAN);
|
686 |
|
687 |
// write the output file type.
|
688 |
buffer.put(MAGIC); |
689 |
|
690 |
// write the date stuff
|
691 |
Calendar c = Calendar.getInstance(); |
692 |
c.setTime(new Date()); |
693 |
buffer.put((byte) (c.get(Calendar.YEAR) % 100)); |
694 |
buffer.put((byte) (c.get(Calendar.MONTH) + 1)); |
695 |
buffer.put((byte) (c.get(Calendar.DAY_OF_MONTH))); |
696 |
|
697 |
// write the number of records in the datafile.
|
698 |
buffer.putInt(myNumRecords); |
699 |
|
700 |
// write the length of the header structure.
|
701 |
buffer.putShort((short) myHeaderLength);
|
702 |
|
703 |
// write the length of a record
|
704 |
buffer.putShort((short) myRecordLength);
|
705 |
|
706 |
// write the reserved bytes in the header
|
707 |
buffer.position(buffer.position() + 17);
|
708 |
|
709 |
// write the language id
|
710 |
buffer.put((byte)getLanguageID());
|
711 |
|
712 |
// write the reserved bytes in the header
|
713 |
buffer.position(buffer.position() + 2);
|
714 |
|
715 |
// write all of the header records
|
716 |
int tempOffset = 0; |
717 |
|
718 |
if (myFieldDescriptions != null) { |
719 |
for (int i = 0; i < myFieldDescriptions.length; i++) { |
720 |
// write the field name
|
721 |
for (int j = 0; j < DbaseFile.MAX_FIELD_NAME_LENGTH+1; j++) { |
722 |
if (myFieldDescriptions[i].myFieldName.length() > j) {
|
723 |
buffer.put((byte) myFieldDescriptions[i].myFieldName.charAt(j));
|
724 |
} else {
|
725 |
buffer.put((byte) 0); |
726 |
} |
727 |
} |
728 |
|
729 |
// write the field type
|
730 |
buffer.put((byte) myFieldDescriptions[i].myFieldType);
|
731 |
|
732 |
// // write the field data address, offset from the start of the
|
733 |
// record.
|
734 |
buffer.putInt(tempOffset); |
735 |
tempOffset += myFieldDescriptions[i].myFieldLength; |
736 |
|
737 |
// write the length of the field.
|
738 |
buffer.put((byte) myFieldDescriptions[i].myFieldLength);
|
739 |
|
740 |
// write the decimal count.
|
741 |
buffer.put((byte) myFieldDescriptions[i].myDecimalCount);
|
742 |
|
743 |
// write the reserved bytes.
|
744 |
// for (in j=0; jj<14; j++) out.writeByteLE(0);
|
745 |
buffer.position(buffer.position() + 14);
|
746 |
} |
747 |
} |
748 |
// write the end of the field definitions marker
|
749 |
buffer.put((byte) 0x0D); |
750 |
|
751 |
buffer.position(0);
|
752 |
|
753 |
int r = buffer.remaining();
|
754 |
|
755 |
while ((r -= out.write(buffer)) > 0) { |
756 |
; // do nothing
|
757 |
} |
758 |
} |
759 |
|
760 |
public String getCharsetName() { |
761 |
return getCharsetName(getLanguageID());
|
762 |
} |
763 |
|
764 |
public String getCharsetName(int ldid) { |
765 |
if (ldid!=0) { |
766 |
charset = DbaseCodepage.getCharsetName(ldid); |
767 |
} |
768 |
if (charset!=null) { |
769 |
// use charset otherwise
|
770 |
return charset;
|
771 |
} |
772 |
// default
|
773 |
return "ISO-8859-1"; |
774 |
} |
775 |
|
776 |
public String getOriginalCharset() { |
777 |
return getCharsetName(this.origLanguageID); |
778 |
} |
779 |
|
780 |
public String mappingEncoding(String dbfEnconding) { |
781 |
if(encodingSupportedByString.contains(dbfEnconding))
|
782 |
return dbfEnconding;
|
783 |
else
|
784 |
return "UTF-8"; |
785 |
} |
786 |
|
787 |
private String getUniqueFieldName(String fieldName, List fieldNames) { |
788 |
|
789 |
int index = 0; |
790 |
String tempFieldName = fieldName;
|
791 |
while(fieldNames.contains(tempFieldName) && index<1000){ |
792 |
index++; |
793 |
String sufix = String.valueOf(index); |
794 |
tempFieldName = tempFieldName.substring(0, DbaseFile.MAX_FIELD_NAME_LENGTH-sufix.length())+sufix;
|
795 |
} |
796 |
if(index>=1000){ |
797 |
throw new RuntimeException("Can't fix duplicated name for field '"+fieldName+"'."); |
798 |
} |
799 |
return tempFieldName;
|
800 |
} |
801 |
|
802 |
} |