import ij.*; import ij.io.*; import ij.gui.*; import ij.process.*; import ij.plugin.*; import java.util.Vector; import java.io.*; import java.awt.*; import java.awt.event.*; import java.awt.image.*; import java.util.*; import java.lang.*; import java.text.DecimalFormat; /** This plug-in defines an Image object generated by a Fluoview TIFF file. Such files are produced by the Olympus Fluoview software. These TIFF files use the TIFF ImageDescription field to store acquisition parameters, version information and color lookup tables. As a result, this Image Description field must be parsed to extract the corresponding information. In addition, this information must be maintained to ensure that if these images are later saved the appropriate Fluoview information is propagated to the new files. FluoviewTiff_ is derived from the ExtendedTiff to pick up that class's support for reading the ImageDescription field from a Fluoview TIFF file. Note: Fluoview software can not read files associated with this FluoviewTiff_ class. It appears the Fluoview software is using some private tags. Need to follow up on this. @see ij.ImagePlus @see plugins.ExtendedTiff_ @see plugins.ExtendedTiffEncoder_ @see plugins.SaveExtendedTiff_ @author Patrick Kelly */ public class FluoviewTiff_ extends ExtendedTiff_ implements PlugIn { // -------------------------------------------------- // // Begin Inner Class Definitions. // // Inner classes to manage parameters. // // // Olympus Fluoview system may output information in several // channels. // public class CProps extends Object { String sTypeDye = ""; String sIntensityRange = ""; public int min = 0; // min intensity public int max = 4095; // max intensity // Support one-level of undo int prevMin = 0; int prevMax = 4095; byte[] bReds; byte[] bGreens; byte[] bBlues; int lutSize; int index = -1; final int defaultSize = 256; // lut size CProps(int i) { index = i; lutSize = defaultSize; allocate(lutSize); } public void setMinAndMax(int min, int max) { this.min = min; this.max = max; } public void setMin(int min) { this.min = min; } public void setMax(int max) { this.max = max; } public int getMin() { return min; } public int getMax() { return max; } private void allocate(int sz) { bReds = new byte[sz]; bGreens = new byte[sz]; bBlues = new byte[sz]; } public String toString() { String s = "Channel "+index+"\n"; s += sIntensityRange; return s; } void setDefaults() { prevMin = min; prevMax = max; } void reset() { min = prevMin; max = prevMax; } } // end CProps inner class // // Maintain information about the acquistion parameters // contained in the ImageDescription field. // public class AcqParams extends Object { String sVersion = ""; String sSource = ""; String sZoom = ""; String sMag = ""; String sDesc = ""; // Keep track of entire acquistion parameter string. This // makes parsing easier. String sap = null; AcqParams(String s) {sap=s;} public String toString() { String s = sap; s+="\n"+"\tVersion: "+sVersion+"\n"; s+="\tSource: "+sSource+"\n"; s+="\tZoom: "+sZoom+"\n"; s+="\tMag: "+sMag+"\n"; s+="\tDesc: "+sDesc+"\n"; return s; } } // end AcqParams inner class // // Inner class to manage parsing Fluoview TIFF ImageDescription // value. This is something of a hack. Should implement a more // formal parsing module using StreamTokenizer or Java equivalent // of lex/yacc. // // This will initialize the ChannelProps and AcqParams // objects maintained by associated instance of FluoviewTiff_. // class FVParser extends Object { final String I_MAPPING_START = "[Intensity Mapping]\r\n"; final String I_MAPPING_END = "[Intensity Mapping End]\r\n"; final String AP_START = "[Acquisition Parameters]\r\n"; final String AP_END = "[Acquisition Parameters End]\r\n"; final String VERSION_START = "[Version Info]\r\n"; final String VERSION_END = "[Version Info End]\r\n"; final String DESC_START = "[Description]\r\n"; final String DESC_END = "[Description End]\r\n"; final String CHANNEL = "Channel"; final String SOURCE = "Source"; final String ZOOM = "Zoom Size"; final String MAG = "Magnification"; final String CHANNEL_TAG = "Ch"; final String MAP_TAG = "Map"; final String SEP = ":"; final String RANGE = "Range="; final String TO = "to"; final String LUT_START = "[LUT Ch"; final String LUT_END = " End]\r\n"; final String LEFT_BRACKET = "["; final String RIGHT_BRACKET = "]"; final String RGB_TAG = "RGB"; final String EQUALS = "="; final String LINE_TERM = "\n"; final String LINE_END = "\r\n"; final String CARRIAGE_RTN = "\r"; final String SPACE = " "; final String TAB = "\t"; // -------------------------------------------------- // Initialize a set of AcqParams by parsing input // ImageDescription String. Returns null on failure. public AcqParams imageDesc2AP(String id) { if(id==null) return null; int is = id.indexOf(AP_START); int ie = id.indexOf(AP_END); if(is==-1 || ie==-1) return null; String sap; try {sap = id.substring(is,ie)+AP_END;} // Add back on ending catch(StringIndexOutOfBoundsException e) {return null; } ap = new AcqParams(sap); // Try to read Source parameter. ap.sSource = readParameter(sap,SOURCE,LINE_TERM); // Try to read Zoom parameter. ap.sZoom = readParameter(sap,ZOOM,LINE_TERM); // Try to read Magnitude parameter. ap.sMag = readParameter(sap,MAG,LINE_TERM); // Following are not really acquistion parameters but we // store them here anyway. Read these from entire string, // not just sap. // Try to read version entry. ap.sVersion = readEntry(id,VERSION_START,VERSION_END); // Try to read Description entry. ap.sDesc = readEntry(id,DESC_START,DESC_END); return ap; } // -------------------------------------------------- // // Read an entire entry, backeted by the input start/end // parameters from the input string. // private String readEntry(String id, String start, String end) { int is = id.indexOf(start); int ie = id.indexOf(end); String s = null; try{s=id.substring(is,ie)+end;} catch(StringIndexOutOfBoundsException e) {return null;} return s; } private String readParameter(String id, String param, String term) { String s = ""; int is = id.indexOf(param); int ie = id.indexOf(term,is); try{s = id.substring(is,ie)+term;} catch(StringIndexOutOfBoundsException e) {return "";} return s; } // -------------------------------------------------- // // Nested class to make dealing w/ IntensityParameter entry // easier. // public class IM extends Object { int min,max; int index; String sMappingRange; IM(String s) { sMappingRange = s; } public String toString() { return sMappingRange; } } // -------------------------------------------------- // // Read intensity mapping entry from ImageDescription // field. // Hashtable imageDesc2IM(String id) { if(id==null) return null; // Select the intensity mapping entry. int is = id.indexOf(I_MAPPING_START); int ie = id.indexOf(I_MAPPING_END); if(is==-1 || ie==-1) return null; // Select out interior of intensity map entry. is += I_MAPPING_START.length(); // Intensity mapping entry is present, so parse it. String smap; try{smap = id.substring(is,ie);} catch(StringIndexOutOfBoundsException e) {return null;} StringTokenizer st = new StringTokenizer(smap,LINE_TERM); Hashtable ht = new Hashtable(); while(st.hasMoreTokens()) { String s = st.nextToken(); // Note - add line terminator back into string. IM im = new IM(s+LINE_TERM); // Read min, max intensity bounds. String sdelim = " \t\n\r"+SEP+EQUALS; StringTokenizer sti = new StringTokenizer(s,sdelim); // Read past Map field. String tmp = sti.nextToken(); // Read Ch# field and get number. String sindex = sti.nextToken(); try{im.index = Integer.parseInt(sindex.substring(2,sindex.length()));} catch(StringIndexOutOfBoundsException e) {return null;} catch(NumberFormatException e) {return null;} // Read Range info. sti.nextToken(); // Read min value. try{im.min = Integer.parseInt(sti.nextToken());} catch(NumberFormatException e) {return null;} // Read past "to" field. sti.nextToken(); // Read max value. try{im.max = Integer.parseInt(sti.nextToken());} catch(NumberFormatException e) {return null;} // Add this to hash table. ht.put(new Integer(im.index),im); } return ht; } // -------------------------------------------------- private CProps[] imageDesc2ChannelProps(String id, Hashtable htim) { if(id==null || htim==null) return null; CProps[] acp = new CProps[htim.size()]; Enumeration keys = htim.keys(); for(int j=0; j-1) { ie = ap.sap.indexOf(LINE_TERM,is); try{s = ap.sap.substring(is,ie)+LINE_TERM;} catch(StringIndexOutOfBoundsException e) {return null;} } is = ap.sap.indexOf(sChannel,ie); if(is>-1) { ie = ap.sap.indexOf(LINE_TERM,is); try{s += ap.sap.substring(is,ie)+LINE_TERM;} catch(StringIndexOutOfBoundsException e) {return null;} } return s; } // -------------------------------------------------- // // Public interface to parsing operations. // public void parseImageDescription(String id) { vChannelProps = null; if(id==null) return; Hashtable htim = imageDesc2IM(id); if(htim==null) return ; vChannelProps = imageDesc2ChannelProps(id,htim); // Enable future changes to intensity mapping bounds to be // undone. initChannelUndo(); } // -------------------------------------------------- // // Convert channel properties and image acquistion parameters // maintained by associated FluoviewTiff_ object into an // ImageDescription string suitable to output to a TIFF file. // public String properties2ImageDesc() { if(ap==null || vChannelProps==null) return null; String sim = I_MAPPING_START; // accumulate intensity mapping String slut = ""; // accumulate lookup tables String stype = ""; // accumulate type/dye strings // Write out channel properties associated w/ each channel // in the image. for(int i=0; i end) return null; if(start < 0 || end >= vChannelProps.length) return null; CProps[] acp = new CProps[end-start+1]; for(int n=start, index=0; n<=end;++n,++index) acp[index] = vChannelProps[n]; return acp; } // -------------------------------------------------- public boolean isActive(int i) { return activeChannels[i]; } // -------------------------------------------------- public void activateChannel(int i) { // Make channel active. activeChannels[i] = true; updateActiveCount(); } // -------------------------------------------------- public void inactivateChannel(int i) { // Ensure that at least one channel is always active. //if(nActive == 1) //return; activeChannels[i] = false; updateActiveCount(); } // -------------------------------------------------- public int numActiveChannels() { return nActive; } // -------------------------------------------------- public AcqParams getAcqParams() { return ap; } // -------------------------------------------------- public int getNumberChannels() { if(vChannelProps==null) return 0; else return vChannelProps.length; } // -------------------------------------------------- /** Use acquisition parameter and channel property information maintained by this object to create a new image description string. This method is necessary because users may need to create an image description for this object after its acquisition and channel property information has been set. */ public void initImageDescription() { if(ap==null || vChannelProps==null) setImageDesc(null); setImageDesc(props2ImageDesc()); } // -------------------------------------------------- public void setChannelProps(CProps[] acp) { vChannelProps = acp; // Reinitialize state data here because channel props have // changed. initState(); // Enable future changes to intensity mapping bounds to be // undone. initChannelUndo(); } // -------------------------------------------------- public void setChannelProps(CProps cp) { vChannelProps = new CProps[] { cp }; // Reinitialize state data because channel props have // changed. initState(); // Enable future changes to intensity mapping bounds to be // undone. initChannelUndo(); } // -------------------------------------------------- /** Allow changes to channel properties to be undone. Only a single level of undo is currently supported. */ public void initChannelUndo() { for(int n=0; n0) return true; else return false; } // -------------------------------------------------- /** Determine if input image is a valid FluoviewTiff_ object. This check first determines if input is an instance of FluoviewTiff_ type. If so, it then checks the image description field in the input image to ensure that it contains valid Fluoview version information. */ public static boolean isFluoviewImage(ImagePlus imp) { if(!(imp instanceof FluoviewTiff_)) return false; return hasFluoviewInfo(imp.getFileInfo().info); } // -------------------------------------------------- /** Creates displayable image based on composite flag. If display is not to show composited images, just behave like a standard ImagePlus object and return Image generated by associated ImageProcessor. Otherwise return a displayable image that is a composite of multiple channels. */ protected Image createImage() { if(!composite) return ip.createImage(); else return compositeChannels(); } // -------------------------------------------------- /** Initialize values of pixels used to build composite image. */ private void initRGBPixels() { // Get pixels for each channel. short[][] vpixels = getImagePixels(); int number = getWidth()*getHeight(); if(rgbPixels==null) rgbPixels = new int[number]; int mask = 0xffff; for(int n=0; n255)?255:rvalue; gvalue = (gvalue>255)?255:gvalue; bvalue = (bvalue>255)?255:bvalue; rgbPixels[n] = 0xff << 24; rgbPixels[n] |= rvalue << 16; rgbPixels[n] |= gvalue << 8; rgbPixels[n] |= bvalue; } } // -------------------------------------------------- /** Initialize values of pixels used to build composite image. */ private void initRGBPixels(int slice) { // Get pixels for each channel. short[][] vpixels = getImagePixelsSlice(slice); int number = getWidth()*getHeight(); if(rgbPixels==null) rgbPixels = new int[number]; int mask = 0xffff; for(int n=0; n255)?255:rvalue; gvalue = (gvalue>255)?255:gvalue; bvalue = (bvalue>255)?255:bvalue; rgbPixels[n] = 0xff << 24; rgbPixels[n] |= rvalue << 16; rgbPixels[n] |= gvalue << 8; rgbPixels[n] |= bvalue; } } // -------------------------------------------------- /** Creates java.awt.Image object that is a composite of multiple channels. This is public so other routines can retrieve a composite image, even if this object is not currently configured to display composite images. */ public Image compositeChannels() { initRGBPixels(); return createCompositeImage(rgbPixels); } public Image compositeChannels(int slice) { initRGBPixels(slice); return createCompositeImage(rgbPixels); } // -------------------------------------------------- /** Does work of creating a RGB java.awt.Image object using input array of pixels. */ private Image createCompositeImage(int[] rgbPixels) { // Composite image into single RGBA image. ImageProducer p = new MemoryImageSource(getWidth(), getHeight(), rgbPixels, 0, // offset getWidth()); // scansize return Toolkit.getDefaultToolkit().createImage(p); } // -------------------------------------------------- /** Retrieve pixels associated with channel c (and currently active slice) from FluoviewTiff_ image. */ private short[] getImagePixels(int c) { // Number of composite images. int numImages = getStackSize()/getNumberChannels(); // Actual slice value. int slice = getSliceOffset() + c*numImages; // Retrieve pixels to process. if(slice==getCurrentSlice()) return (short[])ip.getPixels(); else return (short[])(getStack().getPixels(slice)); } private short[] getImagePixels(int c,int cslice) { // Number of composite images. int numImages = getStackSize()/getNumberChannels(); // Actual slice value. int slice = cslice + c*numImages; // Retrieve pixels to process. if(slice==getCurrentSlice()) return (short[])ip.getPixels(); else return (short[])(getStack().getPixels(slice)); } // -------------------------------------------------- /** Retrieve pixels from all channels associated with current slice. */ private short[][] getImagePixels() { short[][] spixels = new short[getNumberChannels()][]; for(int c=0; c=0) && (height>=0)) { if (getStackSize()>1) win = new FVStackWindow(this); else win = new ImageWindow(this); draw(); IJ.showStatus(""); } else IJ.write(""); } // -------------------------------------------------- /** Override ImagePlus.updateImage() to correctly update displayable image data in case of composite display. */ public void updateImage() { if(!composite) { super.updateImage(); } else { if (ip!=null) { //if (img==null) img = createImage(); //else { // img.flush(); // if (img.getSource()!=ip.getImageSource()) // img = createImage(); //} } } } // -------------------------------------------------- /** Override ImagePlus.getImage() to get correct displayable image in case of composite display. */ public Image getImage() { if(!composite || getType()!=GRAY16) { img = null; super.getImage(); } else { // Return composited version if(img==null && ip!=null) createImage(); } return img; } // -------------------------------------------------- /** Override ImagePlus.setProcessor() to account for compositing behavior if activated. */ public void setProcessor(String title, ImageProcessor ip) { super.setProcessor(title,ip); if(composite) createImage(); } // -------------------------------------------------- /** Override ImagePlus.createImagePlus() to copy Fluoview specific information to new image. */ public ImagePlus createImagePlus() { FluoviewTiff_ imp2 = new FluoviewTiff_(); imp2.setCalibration(getCalibration()); if (vChannelProps==null) return imp2; // Set type of this image. imp2.setType(ImagePlus.GRAY16); // Copy scale information. imp2.copyScale(this); // Copy Fluoview specific information. imp2.setChannelProps(getChannelProps()); imp2.setAcqParams(getAcqParams()); imp2.setImageDesc(imageDescription); // Set channel property defaults for new image. imp2.initChannelUndo(); // Set composite properties for new image. imp2.setComposite(isComposite(),true); return imp2; } // -------------------------------------------------- /** Retrieve statistcs for slice from channel ch. Not really an override but pretty close. */ public ImageStatistics getStatistics(int ch) { int slice = channel2Slice(ch); if(IJ.debugMode==true) IJ.write("Retrieving statistics for slice: "+slice); ImageProcessor ip = getProcessor(); ShortProcessor sp = new ShortProcessor(ip.getWidth(), ip.getHeight(), true); //IJ.write(getStackSize()+" "+slice+" "+getCurrentSlice()); short[] pixels = (short[])getStack().getPixels(getCurrentSlice()); sp.setPixels(pixels); return new ShortStatistics(sp); } // // End ImagePlus overrides. // // ================================================== // -------------------------------------------------- /** Sort channel properties in order of increasing channel index. Use an insertion sort because number of channel props is typically small (1-5). */ private void orderChannelProps() { if(vChannelProps==null) return; for(int k=1; k0) && !inserted) { if(vChannelProps[i-1].index > vChannelProps[i].index) { // Swap the values. CProps tmp = vChannelProps[i-1]; vChannelProps[i-1] = vChannelProps[i]; vChannelProps[i] = tmp; i--; } else inserted = true; } } } // -------------------------------------------------- /** Returns number of composite images that can be created using stack data. */ public int getNumberComposites() { if(getNumberChannels()==0) return 0; int num = (int)Math.floor((double)getStackSize()/ (double)getNumberChannels()); return num; } // -------------------------------------------------- /** Activates particular composite image. */ public void setCompositeSlice(int c) { if(getStackSize()==1) return; if(c>=1 && c<=getNumberComposites()) { setSlice(c); } } // -------------------------------------------------- public void setChannelPropMin(int n, int min) { if(n < 0 || n >= getNumberChannels()) return; vChannelProps[n].setMin(min); colorMods[n].setLower(min,true); } public void setChannelPropMinMax(int n, int min, int max) { if(n<0 || n>=getNumberChannels()) return; vChannelProps[n].setMinAndMax(min,max); colorMods[n].setLowerAndUpper(min,max,true); } // -------------------------------------------------- public void setChannelPropMax(int n, int max) { if(n < 0 || n >= getNumberChannels()) return; vChannelProps[n].setMax(max); colorMods[n].setUpper(max,true); } // -------------------------------------------------- private void initActiveChannels() { activeChannels = null; activeChannels = new boolean[getNumberChannels()]; // All channels initially active. for(int n=0; n