Main Page Namespace List Class Hierarchy Alphabetical List Compound List File List Namespace Members Compound Members File Members Related Pages

MeasureVolInterior.C

Go to the documentation of this file.
00001 /***************************************************************************
00002 *cr
00003 *cr (C) Copyright 1995-2019 The Board of Trustees of the
00004 *cr University of Illinois
00005 *cr All Rights Reserved
00006 *cr
00007 ***************************************************************************/
00008 
00009 /***************************************************************************
00010 * RCS INFORMATION:
00011 *
00012 * $RCSfile: MeasureVolInterior.C,v $
00013 * $Author: johns $ $Locker: $ $State: Exp $
00014 * $Revision: 1.17 $ $Date: 2020年12月13日 07:41:55 $
00015 *
00016 ***************************************************************************
00017 * DESCRIPTION:
00018 * Method for measuring the interior volume in a vesicle or capsid based 
00019 * on an externally-provided simulated density map (e.g., from QuickSurf)
00020 * and a threshold isovalue for inside/outside tests based on density.
00021 * The approach computes and counts interior/exterior voxels based on 
00022 * a parallel ray casting approach on an assumed orthorhombic grid.
00023 * Juan R. Perilla - 2018 
00024 * 
00025 ***************************************************************************/
00026 
00027 #include <tcl.h>
00028 #include "TclCommands.h"
00029 #include "AtomSel.h"
00030 #include "VMDApp.h"
00031 #include "MoleculeList.h"
00032 #include "Molecule.h"
00033 #include "VolumetricData.h"
00034 #include "VolMapCreate.h"
00035 #include "QuickSurf.h"
00036 #include <math.h>
00037 #include "MeasureVolInterior.h"
00038 #include "utilities.h"
00039 
00040 #include <sstream>
00041 #include <string>
00042 
00043 #define MIN(X,Y) (((X)<(Y))? (X) : (Y))
00044 
00045 typedef struct{
00046 const VolumetricData *volmapA;
00047 VolumetricData *targetVol;
00048 VolumetricData *pmap;
00049 float *targetPmap;
00050 float *targetMap;
00051 const float *testMap;
00052 const int *numvoxels;
00053 float isovalue;
00054 wkf_mutex_t mtx;
00055 const float *rayDir;
00056 long Nout;
00057 } volinparms;
00058 
00059 
00060 static void * volinthread_prob(void *voidparms) {
00061 wkf_tasktile_t tile;
00062 volinparms *parms = NULL;
00063 wkf_threadlaunch_getdata(voidparms, (void **) &parms);
00064 
00065 int gx,gy,gz;
00066 long Nout=0l;
00067 while (wkf_threadlaunch_next_tile(voidparms, 16384, &tile) != WKF_SCHED_DONE) { 
00068 int myx;
00069 int xsize = parms->numvoxels[0];
00070 int ysize = parms->numvoxels[1];
00071 int xysize = ysize*xsize; // precompute to save math later
00072 for (myx=tile.start; myx<tile.end; myx++) {
00073 gz = myx / (xysize); // int divide/mod are SLOW (20-100+ clocks)
00074 gy = (myx % (xysize)) / xsize; // maybe make outer loop and inner loop
00075 gx = myx % xsize; // so we iterate over consecutive X...
00076 
00077 // skip protein voxels (-5.0f) 
00078 float voxelA = parms->targetMap[myx];
00079 if (voxelA != PROTEINVOXEL) {
00080 int coord[3] = {gx, gy, gz};
00081 int step[3];
00082 float rayOrig[3] = {gx + 0.5f, gy + 0.5f, gz + 0.5f};
00083 float deltaDist[3];
00084 float next[3];
00085 
00086 for (int ii = 0; ii < 3; ii++) {
00087 // XXX floating point division is slow, we should pre-compute the
00088 // rayDir[] / rayDir[..] fractions ahead of the main loop over voxels
00089 // in a separate tiny table, so we can just use them over and over 
00090 // without actually performing the divide, which is terribly slow on
00091 // most CPUs... Then the ternary expression just becomes a select or
00092 // conditional move instruction that is way way faster if in-cache.
00093 // Further, it may be possible to precompute deltaDist via table too,
00094 // rather than having to call sqrtf(), which is also very slow.
00095 const float x = (parms->rayDir[ii] != 0.0f) ? (parms->rayDir[0] / parms->rayDir[ii]) : parms->rayDir[0];
00096 const float y = (parms->rayDir[ii] != 0.0f) ? (parms->rayDir[1] / parms->rayDir[ii]) : parms->rayDir[1];
00097 const float z = (parms->rayDir[ii] != 0.0f) ? (parms->rayDir[2] / parms->rayDir[ii]) : parms->rayDir[2];
00098 deltaDist[ii] = sqrtf(x*x + y*y + z*z);
00099 if (parms->rayDir[ii] < 0.f) {
00100 step[ii] = -1;
00101 next[ii] = (rayOrig[ii] - coord[ii]) * deltaDist[ii];
00102 } else {
00103 step[ii] = 1;
00104 next[ii] = (coord[ii] + 1.f - rayOrig[ii]) * deltaDist[ii];
00105 }
00106 }
00107 
00108 int hit=0;
00109 while(hit==0) {
00110 // Perform DDA
00111 int side=0;
00112 for (int ii=1; ii < 3; ii++) {
00113 if (next[side] > next[ii]) {
00114 side=ii;
00115 }
00116 }
00117 next[side] += deltaDist[side];
00118 coord[side] += step[side];
00119 
00120 // Check if out of bounds
00121 if (coord[side] < 0 || coord[side] >= parms->numvoxels[side] ) {
00122 //parms->targetMap[myx]=EXTERIORVOXEL;
00123 Nout+=1l;
00124 break; //XXX
00125 }
00126 
00127 // Check if ray has hit a wall
00128 if (parms->testMap[coord[2]*xysize + coord[1]*xsize + coord[0]] >= parms->isovalue) {
00129 hit=1;
00130 parms->targetPmap[myx] += 1.0f;
00131 }
00132 } // wall detection loop
00133 }
00134 }
00135 }
00136 
00137 // atomic update of total Nout value
00138 // XXX we should consider using atomic increments instead
00139 wkf_mutex_lock(&parms->mtx); 
00140 parms->Nout += Nout;
00141 wkf_mutex_unlock(&parms->mtx);
00142 
00143 return NULL;
00144 } // volinthread
00145 
00146 
00147 static void * volinthread(void *voidparms) {
00148 wkf_tasktile_t tile;
00149 volinparms *parms = NULL;
00150 wkf_threadlaunch_getdata(voidparms, (void **) &parms);
00151 
00152 int gx,gy,gz;
00153 long Nout=0l;
00154 while (wkf_threadlaunch_next_tile(voidparms, 16384, &tile) != WKF_SCHED_DONE) { 
00155 int myx;
00156 int xsize = parms->numvoxels[0];
00157 int ysize = parms->numvoxels[1];
00158 int xysize = ysize*xsize; // precompute to save math later
00159 for (myx=tile.start; myx<tile.end; myx++) {
00160 gz = myx / (xysize); // int divide/mod are SLOW (20-100+ clocks)
00161 gy = (myx % (xysize)) / xsize; // maybe make outer loop and inner loop
00162 gx = myx % xsize; // so we iterate over consecutive X...
00163 
00164 // Find voxels inside but skip voxels that we already know to be 
00165 // inside from previous rounds
00166 float voxelA = parms->targetMap[myx];
00167 if (voxelA != EXTERIORVOXEL) {
00168 int coord[3] = {gx, gy, gz};
00169 int step[3];
00170 float rayOrig[3] = {gx + 0.5f, gy + 0.5f, gz + 0.5f} ;
00171 float deltaDist[3];
00172 float next[3];
00173 
00174 for (int ii = 0; ii < 3; ii++) {
00175 // XXX floating point division is slow, we should pre-compute the
00176 // rayDir[] / rayDir[..] fractions ahead of the main loop over voxels
00177 // in a separate tiny table, so we can just use them over and over 
00178 // without actually performing the divide, which is terribly slow on
00179 // most CPUs... Then the ternary expression just becomes a select or
00180 // conditional move instruction that is way way faster if in-cache.
00181 // Further, it may be possible to precompute deltaDist via table too,
00182 // rather than having to call sqrtf(), which is also very slow.
00183 const float x = (parms->rayDir[ii] != 0.0f) ? (parms->rayDir[0] / parms->rayDir[ii]) : parms->rayDir[0];
00184 const float y = (parms->rayDir[ii] != 0.0f) ? (parms->rayDir[1] / parms->rayDir[ii]) : parms->rayDir[1];
00185 const float z = (parms->rayDir[ii] != 0.0f) ? (parms->rayDir[2] / parms->rayDir[ii]) : parms->rayDir[2];
00186 deltaDist[ii] = sqrtf(x*x + y*y + z*z);
00187 if (parms->rayDir[ii] < 0.f) {
00188 step[ii] = -1;
00189 next[ii] = (rayOrig[ii] - coord[ii]) * deltaDist[ii];
00190 } else {
00191 step[ii] = 1;
00192 next[ii] = (coord[ii] + 1.f - rayOrig[ii]) * deltaDist[ii];
00193 }
00194 }
00195 
00196 int hit=0;
00197 while(hit==0) {
00198 // Perform DDA
00199 int side=0;
00200 for (int ii=1; ii < 3; ii++) {
00201 if (next[side] > next[ii]) {
00202 side=ii;
00203 }
00204 }
00205 next[side] += deltaDist[side];
00206 coord[side] += step[side];
00207 
00208 // Check if out of bounds
00209 if (coord[side] < 0 || coord[side] >= parms->numvoxels[side] ) {
00210 parms->targetMap[myx]=EXTERIORVOXEL;
00211 Nout+=1l;
00212 break;
00213 }
00214 
00215 // Check if ray has hit a wall
00216 if (parms->testMap[coord[2]*xysize + coord[1]*xsize + coord[0]] >= parms->isovalue) {
00217 hit=1;
00218 }
00219 } // wall detection loop
00220 }
00221 }
00222 }
00223 
00224 // atomic update of total Nout value
00225 // XXX we should consider using atomic increments instead
00226 wkf_mutex_lock(&parms->mtx); 
00227 parms->Nout += Nout;
00228 wkf_mutex_unlock(&parms->mtx);
00229 
00230 return NULL;
00231 } // volinthread
00232 
00233 
00234 long volin_threaded_prob(const VolumetricData *volmapA, VolumetricData *targetVol, 
00235 VolumetricData *targetPvol, float _isovalue, float *rayDir) {
00236 volinparms parms;
00237 memset(&parms, 0, sizeof(parms));
00238 parms.testMap = volmapA->data;
00239 parms.targetMap = targetVol->data;
00240 parms.targetPmap = targetPvol->data;
00241 int numvoxels [] = {volmapA->xsize, volmapA->ysize, volmapA->zsize};
00242 parms.numvoxels = numvoxels;
00243 parms.targetVol = targetVol;
00244 parms.volmapA = volmapA;
00245 parms.isovalue = _isovalue;
00246 parms.rayDir = rayDir;
00247 parms.Nout=0;
00248 
00249 long Nout;
00250 int physprocs = wkf_thread_numprocessors();
00251 int maxprocs = physprocs;
00252 float *voltexmap = NULL;
00253 
00254 // We can productively use only a few cores per socket due to the
00255 // limited memory bandwidth per socket. Also, hyperthreading
00256 // actually hurts performance. These two considerations combined
00257 // with the linear increase in memory use prevent us from using large
00258 // numbers of cores with this simple approach, so if we've got more 
00259 // than 8 CPU cores, we'll iteratively cutting the core count in 
00260 // half until we're under 20 cores.
00261 while (maxprocs > 40) 
00262 maxprocs /= 2;
00263 
00264 // Limit the number of CPU cores used so we don't run the 
00265 // machine out of memory during surface computation.
00266 // Use either a dynamic or hard-coded heuristic to limit the
00267 // number of CPU threads we will spawn so that we don't run
00268 // the machine out of memory. 
00269 long volsz = numvoxels[0] * numvoxels[1] * numvoxels[2];
00270 long volmemsz = sizeof(float) * volsz;
00271 long volmemszkb = volmemsz / 1024;
00272 long volmemtexszkb = volmemszkb + ((voltexmap != NULL) ? 3*volmemszkb : 0);
00273 
00274 // Platforms that don't have a means of determining available
00275 // physical memory will return -1, in which case we fall back to the
00276 // simple hard-coded 2GB-max-per-core heuristic.
00277 long vmdcorefree = -1;
00278 
00279 #if defined(ARCH_BLUEWATERS) || defined(ARCH_CRAY_XC) || defined(ARCH_CRAY_XK) || defined(ARCH_LINUXAMD64) || defined(ARCH_SOLARIS2_64) || defined(ARCH_SOLARISX86_64) || defined(ARCH_AIX6_64) || defined(ARCH_MACOSXARM64) || defined(ARCH_MACOSXX86_64) 
00280 // XXX The core-free query scheme has one weakness in that we might have a 
00281 // 32-bit version of VMD running on a 64-bit machine, where the available
00282 // physical memory may be much larger than is possible for a 
00283 // 32-bit VMD process to address. To do this properly we must therefore
00284 // use conditional compilation safety checks here until we have a better
00285 // way of determining this with a standardized helper routine.
00286 vmdcorefree = vmd_get_avail_physmem_mb();
00287 #endif
00288 
00289 if (vmdcorefree >= 0) {
00290 // Make sure QuickSurf uses no more than a fraction of the free memory
00291 // as an upper bound alternative to the hard-coded heuristic.
00292 // This should be highly preferable to the fixed-size heuristic
00293 // we had used in all cases previously.
00294 while ((volmemtexszkb * maxprocs) > (1024*vmdcorefree/4)) {
00295 maxprocs /= 2;
00296 }
00297 } else {
00298 // Set a practical per-core maximum memory use limit to 2GB, for all cores
00299 while ((volmemtexszkb * maxprocs) > (2 * 1024 * 1024))
00300 maxprocs /= 2;
00301 }
00302 
00303 if (maxprocs < 1) 
00304 maxprocs = 1;
00305 int numprocs = maxprocs; // ever the optimist
00306 wkf_mutex_init(&parms.mtx);
00307 wkf_tasktile_t tile;
00308 tile.start = 0;
00309 tile.end = volsz;
00310 wkf_threadlaunch(numprocs, &parms, volinthread_prob, &tile);
00311 wkf_mutex_destroy(&parms.mtx);
00312 Nout=parms.Nout;
00313 
00314 return Nout;
00315 }
00316 
00317 
00318 long volin_threaded(const VolumetricData *volmapA, VolumetricData *targetVol, 
00319 float _isovalue, float *rayDir) {
00320 volinparms parms;
00321 memset(&parms, 0, sizeof(parms));
00322 parms.testMap = volmapA->data;
00323 parms.targetMap = targetVol->data;
00324 int numvoxels [] = {volmapA->xsize, volmapA->ysize, volmapA->zsize};
00325 parms.numvoxels = numvoxels;
00326 parms.targetVol = targetVol;
00327 parms.volmapA = volmapA;
00328 parms.isovalue = _isovalue;
00329 parms.rayDir = rayDir;
00330 parms.Nout=0;
00331 
00332 long Nout;
00333 int physprocs = wkf_thread_numprocessors();
00334 int maxprocs = physprocs;
00335 float *voltexmap = NULL;
00336 
00337 // We can productively use only a few cores per socket due to the
00338 // limited memory bandwidth per socket. Also, hyperthreading
00339 // actually hurts performance. These two considerations combined
00340 // with the linear increase in memory use prevent us from using large
00341 // numbers of cores with this simple approach, so if we've got more 
00342 // than 8 CPU cores, we'll iteratively cutting the core count in 
00343 // half until we're under 20 cores.
00344 while (maxprocs > 40) 
00345 maxprocs /= 2;
00346 
00347 // Limit the number of CPU cores used so we don't run the 
00348 // machine out of memory during surface computation.
00349 // Use either a dynamic or hard-coded heuristic to limit the
00350 // number of CPU threads we will spawn so that we don't run
00351 // the machine out of memory. 
00352 long volsz = numvoxels[0] * numvoxels[1] * numvoxels[2];
00353 long volmemsz = sizeof(float) * volsz;
00354 long volmemszkb = volmemsz / 1024;
00355 long volmemtexszkb = volmemszkb + ((voltexmap != NULL) ? 3*volmemszkb : 0);
00356 
00357 // Platforms that don't have a means of determining available
00358 // physical memory will return -1, in which case we fall back to the
00359 // simple hard-coded 2GB-max-per-core heuristic.
00360 long vmdcorefree = -1;
00361 
00362 #if defined(ARCH_BLUEWATERS) || defined(ARCH_CRAY_XC) || defined(ARCH_CRAY_XK) || defined(ARCH_LINUXAMD64) || defined(ARCH_SOLARIS2_64) || defined(ARCH_SOLARISX86_64) || defined(ARCH_AIX6_64) || defined(MARCOSXARM64) || defined(ARCH_MACOSXX86_64) 
00363 // XXX The core-free query scheme has one weakness in that we might have a 
00364 // 32-bit version of VMD running on a 64-bit machine, where the available
00365 // physical memory may be much larger than is possible for a 
00366 // 32-bit VMD process to address. To do this properly we must therefore
00367 // use conditional compilation safety checks here until we have a better
00368 // way of determining this with a standardized helper routine.
00369 vmdcorefree = vmd_get_avail_physmem_mb();
00370 #endif
00371 
00372 if (vmdcorefree >= 0) {
00373 // Make sure QuickSurf uses no more than a fraction of the free memory
00374 // as an upper bound alternative to the hard-coded heuristic.
00375 // This should be highly preferable to the fixed-size heuristic
00376 // we had used in all cases previously.
00377 while ((volmemtexszkb * maxprocs) > (1024*vmdcorefree/4)) {
00378 maxprocs /= 2;
00379 }
00380 } else {
00381 // Set a practical per-core maximum memory use limit to 2GB, for all cores
00382 while ((volmemtexszkb * maxprocs) > (2 * 1024 * 1024))
00383 maxprocs /= 2;
00384 }
00385 
00386 if (maxprocs < 1) 
00387 maxprocs = 1;
00388 int numprocs = maxprocs; // ever the optimist
00389 wkf_mutex_init(&parms.mtx);
00390 wkf_tasktile_t tile;
00391 tile.start = 0;
00392 tile.end = volsz;
00393 wkf_threadlaunch(numprocs, &parms, volinthread, &tile);
00394 wkf_mutex_destroy(&parms.mtx);
00395 Nout=parms.Nout;
00396 
00397 return Nout;
00398 }
00399 
00400 
00401 VolumetricData* CreateEmptyGrid(const VolumetricData *volmapA) {
00402 VolumetricData *targetVol;
00403 float *volmap;
00404 // Calculate dimension of the grid
00405 long volsz=volmapA->xsize * volmapA->ysize * volmapA->zsize;
00406 // Allocate new grid
00407 volmap = new float[volsz];
00408 // Initialize new grid to 0
00409 memset(volmap, 0, sizeof(float)*volsz); 
00410 
00411 // Setup Volumetric data
00412 targetVol = new VolumetricData("inout map", volmapA->origin, 
00413 volmapA->xaxis, volmapA->yaxis, volmapA->zaxis,
00414 volmapA->xsize, volmapA->ysize, volmapA->zsize,
00415 volmap);
00416 return targetVol;
00417 }
00418 
00419 
00420 VolumetricData* CreateProbGrid(const VolumetricData *volmapA) {
00421 VolumetricData *targetPvol;
00422 float *volmap;
00423 // Calculate dimension of the grid
00424 long volsz=volmapA->xsize * volmapA->ysize * volmapA->zsize;
00425 // Allocate new grid
00426 volmap = new float[volsz];
00427 // Initialize new grid to 0
00428 memset(volmap, 0, sizeof(float)*volsz); 
00429 
00430 // Setup Volumetric data
00431 targetPvol = new VolumetricData("P(in) map", volmapA->origin, 
00432 volmapA->xaxis, volmapA->yaxis, volmapA->zaxis,
00433 volmapA->xsize, volmapA->ysize, volmapA->zsize,
00434 volmap);
00435 return targetPvol;
00436 }
00437 
00438 
00439 VolumetricData* normalize_pmap(const VolumetricData *volmapA, int nrays) {
00440 long gridsz = volmapA->gridsize();
00441 const float* map = volmapA->data;
00442 float *volmap;
00443 
00444 VolumetricData *targetPmap;
00445 
00446 long volsz = volmapA->xsize * volmapA->ysize * volmapA->zsize;
00447 volmap = new float[volsz];
00448 memset(volmap, 0, sizeof(float)*volsz);
00449 
00450 float nrays_inv = 1.0f / float(nrays);
00451 for (long l=0; l<gridsz; l++) {
00452 volmap[l] = map[l] * nrays_inv;
00453 }
00454 
00455 targetPmap = new VolumetricData("P(in) map", volmapA->origin,
00456 volmapA->xaxis, volmapA->yaxis, volmapA->zaxis,
00457 volmapA->xsize, volmapA->ysize, volmapA->zsize,
00458 volmap);
00459 return targetPmap;
00460 }
00461 
00462 
00463 void VolIn_CleanGrid(VolumetricData *volmapA) {
00464 long volsz=volmapA->xsize * volmapA->ysize * volmapA->zsize;
00465 memset(volmapA->data, 0, sizeof(float)*volsz);
00466 }
00467 
00468 
00469 long countIsoGrids(const VolumetricData *volmapA, const float _isovalue) {
00470 long volInIso=0;
00471 long gridsize = volmapA->gridsize();
00472 const float* map=volmapA->data;
00473 
00474 for (long l=0; l<gridsize; l++) {
00475 if (map[l] == _isovalue) volInIso += 1;
00476 }
00477 
00478 return volInIso;
00479 }
00480 
00481 
00482 long markIsoGrid(const VolumetricData *volmapA, VolumetricData *targetVol, 
00483 const float _isovalue) {
00484 long nVoxels=0;
00485 
00486 long gridsize = volmapA->gridsize();
00487 const float *map=volmapA->data;
00488 float* targetmap=targetVol->data;
00489 
00490 for (long l=0; l<gridsize; l++) {
00491 if (map[l] >= _isovalue) {
00492 targetmap[l]=PROTEINVOXEL;
00493 nVoxels+=1;
00494 }
00495 }
00496 
00497 return nVoxels;
00498 }
00499 
00500 
00501 long RaycastGrid(const VolumetricData *volmapA, VolumetricData *targetVol,
00502 float _isovalue, float *rayDir) {
00503 int volDimens[3]; // Volume dimensions
00504 
00505 // Calculate dimension of the grid
00506 float *map=targetVol->data;
00507 volDimens[0]=volmapA->xsize;
00508 volDimens[1]=volmapA->ysize; 
00509 volDimens[2]=volmapA->zsize;
00510 
00511 float isovalue=_isovalue; // Set isovalue for wall hit detection
00512 
00513 // Ray moves in gridspace 
00514 for (int k=0; k < volDimens[2]; k++) {
00515 for (int j=0; j < volDimens[1]; j++) {
00516 long voxrowaddr = k*volDimens[0]*volDimens[1] + j*volDimens[0];
00517 for (int i=0; i < volDimens[0]; i++) {
00518 long voxeladdr = voxrowaddr + i;
00519 if (map[voxeladdr] != EXTERIORVOXEL) {
00520 float deltaDist[3];
00521 float next[3];
00522 int coord[3] = { i,j,k };
00523 int step[3];
00524 float rayOrig[3] = { i + 0.5f , j + 0.5f , k + 0.5f } ;
00525 
00526 for (int ii = 0; ii < 3; ii++) {
00527 const float x = (rayDir[0] / rayDir[ii]);
00528 const float y = (rayDir[1] / rayDir[ii]);
00529 const float z = (rayDir[2] / rayDir[ii]);
00530 deltaDist[ii] = sqrtf(x*x + y*y + z*z);
00531 if (rayDir[ii] < 0.f) {
00532 step[ii] = -1;
00533 next[ii] = (rayOrig[ii] - coord[ii]) * deltaDist[ii];
00534 } else {
00535 step[ii] = 1;
00536 next[ii] = (coord[ii] + 1.f - rayOrig[ii]) * deltaDist[ii];
00537 }
00538 }
00539 
00540 int hit=0;
00541 while (hit==0) {
00542 // Perform DDA
00543 int side=0;
00544 for (int ii=1; ii<3; ii++) {
00545 if (next[side] > next[ii]) {
00546 side=ii;
00547 }
00548 }
00549 next[side] += deltaDist[side];
00550 coord[side] += step[side];
00551 
00552 // Check if out of bounds
00553 if (coord[side] < 0 || coord[side] >= volDimens[side] ) {
00554 map[voxeladdr]=EXTERIORVOXEL;
00555 break;
00556 }
00557 
00558 // Check if ray has hit a wall
00559 if (volmapA->data[coord[2]*volDimens[0]*volDimens[1] + coord[1]*volDimens[0] + coord[0]] > isovalue) {
00560 hit=1;
00561 }
00562 } // wall detection loop
00563 } // conditional
00564 } // loop over i
00565 } // loop over j
00566 } //loop ver k
00567 
00568 return 0;
00569 }
00570 
00571 
00572 VolumetricData* process_pmap (const VolumetricData *pmap, float cutoff) {
00573 long gridsz=pmap->gridsize();
00574 const float* map=pmap->data;
00575 float *volmap;
00576 
00577 VolumetricData *targetPmap;
00578 
00579 long volsz = pmap->xsize*pmap->ysize*pmap->zsize;
00580 volmap = new float[volsz];
00581 memset(volmap, 0, sizeof(float)*volsz);
00582 
00583 for (long l=0; l<gridsz; l++) {
00584 if (map[l]==0.0f) {
00585 volmap[l]=PROTEINVOXEL;
00586 } else if ((map[l]-cutoff) >= VOLMAPTOLERANCE) {
00587 volmap[l]=INTERIORVOXEL;
00588 } else if ((map[l]-cutoff) <= VOLMAPTOLERANCE) {
00589 volmap[l]=EXTERIORVOXEL;
00590 }
00591 }
00592 
00593 targetPmap = new VolumetricData("ProcMap", pmap->origin,
00594 pmap->xaxis, pmap->yaxis, pmap->zaxis,
00595 pmap->xsize, pmap->ysize, pmap->zsize,
00596 volmap);
00597 
00598 return targetPmap;
00599 }
00600 
00601 
00602 long vol_probability(const VolumetricData* probmap, float cutoff, float gspace) {
00603 long Nvox=0;
00604 long gridsz=probmap->gridsize();
00605 const float* map=probmap->data;
00606 
00607 for (long l=0; l<gridsz; l++) {
00608 if (map[l] >= cutoff) {
00609 Nvox++;
00610 }
00611 }
00612 return Nvox;
00613 }
00614 
00615 
00616 bool isfloat(char* opt) {
00617 std::istringstream iss(opt);
00618 float f;
00619 iss >> std::noskipws >> f;
00620 return iss.eof() && !iss.fail();
00621 }
00622 
00623 

Generated on Wed Nov 19 02:46:33 2025 for VMD (current) by doxygen1.2.14 written by Dimitri van Heesch, © 1997-2002

AltStyle によって変換されたページ (->オリジナル) /