Update from SM-G950F_OO_Opensource_kernel

This commit is contained in:
Andreas Schneider 2018-02-12 19:25:18 +01:00
parent 8e7bcf4240
commit 0490ece85e
27 changed files with 2182 additions and 1620 deletions

12
Kconfig
View File

@ -6,7 +6,7 @@ config SDFAT_FS
select NLS_CODEPAGE_437
select NLS_ISO8859_1
help
If you want to use the sdFAT file system, then you must say Y or M
If you want to use the sdFAT file system, then you must say Y or M
here to inlucde sdFAT support.
sdFAT is unified FAT-based file system which supports not only fat12/
16/32 with vfat but also exfat. sdFAT supports winnt short-name rule.
@ -21,12 +21,15 @@ config SDFAT_DELAYED_META_DIRTY
depends on SDFAT_FS
help
If you enable this feature, metadata(FAT/Directory entry) is updated
by flush thread.
by flush thread.
config SDFAT_SUPPORT_DIR_SYNC
bool "Enable supporting dir sync"
default n
depends on SDFAT_FS
help
If you enable this feature, the modification for directory operation
is written to a storage at once.
config SDFAT_DEFAULT_CODEPAGE
int "Default codepage for sdFAT"
@ -93,3 +96,8 @@ config SDFAT_DBG_BUGON
bool "enable strict BUG_ON() for debugging"
depends on SDFAT_FS && SDFAT_DEBUG
default n
config SDFAT_STATISTICS
bool "enable statistics for bigdata"
depends on SDFAT_FS
default y

View File

@ -5,10 +5,11 @@
obj-$(CONFIG_SDFAT_FS) += sdfat_fs.o
sdfat_fs-objs := sdfat.o core.o core_fat.o core_exfat.o api.o blkdev.o \
fatent.o amap_smart.o cache.o dfr.o nls.o misc.o xattr.o \
fatent.o amap_smart.o cache.o dfr.o nls.o misc.o \
mpage.o extent.o
sdfat_fs-$(CONFIG_SDFAT_VIRTUAL_XATTR) += xattr.o
sdfat_fs-$(CONFIG_SDFAT_STATISTICS) += statistics.o
all:

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -85,7 +83,7 @@ static inline int amap_remove_from_list(AU_INFO_T *au, struct slist_head *shead)
}
/* Full-linear serach => Find AU with max. number of fclu */
static inline AU_INFO_T* amap_find_hot_au_largest(struct slist_head *shead)
static inline AU_INFO_T *amap_find_hot_au_largest(struct slist_head *shead)
{
struct slist_head *iter;
uint16_t max_fclu = 0;
@ -98,7 +96,7 @@ static inline AU_INFO_T* amap_find_hot_au_largest(struct slist_head *shead)
while (iter) {
entry = list_entry(iter, AU_INFO_T, shead);
if (entry->free_clusters > max_fclu) {
max_fclu = entry->free_clusters;
ret = entry;
@ -111,8 +109,9 @@ static inline AU_INFO_T* amap_find_hot_au_largest(struct slist_head *shead)
}
/* Find partially used AU with max. number of fclu.
If there is no partial AU available, pick a clean one */
static inline AU_INFO_T* amap_find_hot_au_partial(AMAP_T *amap)
* If there is no partial AU available, pick a clean one
*/
static inline AU_INFO_T *amap_find_hot_au_partial(AMAP_T *amap)
{
struct slist_head *iter;
uint16_t max_fclu = 0;
@ -126,7 +125,7 @@ static inline AU_INFO_T* amap_find_hot_au_partial(AMAP_T *amap)
while (iter) {
entry = list_entry(iter, AU_INFO_T, shead);
if (entry->free_clusters > max_fclu) {
if (entry->free_clusters < amap->clusters_per_au) {
max_fclu = entry->free_clusters;
@ -147,13 +146,13 @@ static inline AU_INFO_T* amap_find_hot_au_partial(AMAP_T *amap)
/*
Size-base AU management functions
*/
* Size-base AU management functions
*/
/*
Add au into cold AU MAP
au: an isolated (not in a list) AU data structure
*/
* Add au into cold AU MAP
* au: an isolated (not in a list) AU data structure
*/
int amap_add_cold_au(AMAP_T *amap, AU_INFO_T *au)
{
FCLU_NODE_T *fclu_node = NULL;
@ -179,9 +178,9 @@ int amap_add_cold_au(AMAP_T *amap, AU_INFO_T *au)
}
/*
Remove an AU from AU MAP
*/
int amap_remove_cold_au(AMAP_T *amap, AU_INFO_T* au)
* Remove an AU from AU MAP
*/
int amap_remove_cold_au(AMAP_T *amap, AU_INFO_T *au)
{
struct list_head *prev = au->head.prev;
@ -198,13 +197,13 @@ int amap_remove_cold_au(AMAP_T *amap, AU_INFO_T* au)
}
/* "Find" best fit AU
returns NULL if there is no AU w/ enough free space.
This function doesn't change AU status.
The caller should call amap_remove_cold_au() if needed.
*/
AU_INFO_T* amap_find_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
/* "Find" best fit AU
* returns NULL if there is no AU w/ enough free space.
*
* This function doesn't change AU status.
* The caller should call amap_remove_cold_au() if needed.
*/
AU_INFO_T *amap_find_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
{
AU_INFO_T *au = NULL;
FCLU_NODE_T *fclu_iter;
@ -214,7 +213,7 @@ AU_INFO_T* amap_find_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
free_clusters);
return NULL;
}
fclu_iter = NODE(free_clusters, amap);
if (amap->fclu_hint < free_clusters) {
@ -243,14 +242,15 @@ AU_INFO_T* amap_find_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
}
/* "Pop" best fit AU
returns NULL if there is no AU w/ enough free space.
The returned AU will not be in the list anymore.
*/
AU_INFO_T* amap_pop_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
/* "Pop" best fit AU
*
* returns NULL if there is no AU w/ enough free space.
* The returned AU will not be in the list anymore.
*/
AU_INFO_T *amap_pop_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
{
/* Naive implementation */
AU_INFO_T* au;
AU_INFO_T *au;
au = amap_find_cold_au_bestfit(amap, free_clusters);
if (au)
@ -262,15 +262,14 @@ AU_INFO_T* amap_pop_cold_au_bestfit(AMAP_T *amap, uint16_t free_clusters)
/* Pop the AU with the largest free space
search from 'start_fclu' to 0
(target freecluster : -1 for each step)
start_fclu = 0 means to search from the max. value
*/
AU_INFO_T* amap_pop_cold_au_largest(AMAP_T *amap, uint16_t start_fclu)
*
* search from 'start_fclu' to 0
* (target freecluster : -1 for each step)
* start_fclu = 0 means to search from the max. value
*/
AU_INFO_T *amap_pop_cold_au_largest(AMAP_T *amap, uint16_t start_fclu)
{
AU_INFO_T* au = NULL;
AU_INFO_T *au = NULL;
FCLU_NODE_T *fclu_iter;
if (!start_fclu)
@ -283,7 +282,7 @@ AU_INFO_T* amap_pop_cold_au_largest(AMAP_T *amap, uint16_t start_fclu)
fclu_iter = NODE(amap->fclu_hint, amap);
else
fclu_iter = NODE(start_fclu, amap);
/* Naive Hash management */
do {
if (!list_empty(&fclu_iter->head)) {
@ -311,10 +310,10 @@ AU_INFO_T* amap_pop_cold_au_largest(AMAP_T *amap, uint16_t start_fclu)
/*
===============================================
Allocation Map related functions
===============================================
*/
* ===============================================
* Allocation Map related functions
* ===============================================
*/
/* Create AMAP related data structure (mount time) */
int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hidden_sect)
@ -350,7 +349,7 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
sdfat_msg(sb, KERN_ERR,
"invalid AU size (sect_per_au : %u, "
"sect_per_clus : %u) "
"please re-format for performance.",
"please re-format for performance.",
sect_per_au, fsi->sect_per_clus);
return -EINVAL;
}
@ -360,7 +359,7 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
sdfat_msg(sb, KERN_ERR,
"misaligned part (start sect : %u, "
"sect_per_clus : %u) "
"please re-format for performance.",
"please re-format for performance.",
misaligned_sect, fsi->sect_per_clus);
return -EINVAL;
}
@ -370,7 +369,7 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
sdfat_msg(sb, KERN_ERR,
"misaligned data area (start sect : %u, "
"sect_per_clus : %u) "
"please re-format for performance.",
"please re-format for performance.",
fsi->data_start_sector, fsi->sect_per_clus);
return -EINVAL;
}
@ -383,22 +382,23 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
return -ENOMEM;
amap->sb = sb;
amap->n_au = (fsi->num_sectors + misaligned_sect + sect_per_au - 1) / sect_per_au;
amap->n_clean_au = 0;
amap->n_full_au = 0;
/* Reflect block-partition align first,
then partition-data_start align */
/* Reflect block-partition align first,
* then partition-data_start align
*/
amap->clu_align_bias = (misaligned_sect / fsi->sect_per_clus);
amap->clu_align_bias += (fsi->data_start_sector >> fsi->sect_per_clus_bits) - CLUS_BASE;
amap->clusters_per_au = sect_per_au / fsi->sect_per_clus;
/* That is,
/* That is,
* the size of cluster is at least 4KB if the size of AU is 4MB
*/
if (amap->clusters_per_au > MAX_CLU_PER_AU) {
sdfat_log_msg(sb, KERN_INFO,
sdfat_log_msg(sb, KERN_INFO,
"too many clusters per AU (clus/au:%d > %d).",
amap->clusters_per_au,
MAX_CLU_PER_AU);
@ -417,7 +417,6 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
/* Allocate AU info table */
n_au_table = (amap->n_au + N_AU_PER_TABLE - 1) / N_AU_PER_TABLE;
amap->au_table = kmalloc(sizeof(AU_INFO_T *) * n_au_table, GFP_NOIO);
if (!amap->au_table) {
sdfat_msg(sb, KERN_ERR,
"failed to alloc amap->au_table\n");
@ -439,9 +438,9 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
(unsigned long)sizeof(FCLU_NODE_T));
if (!amap->fclu_order)
amap->fclu_nodes = (FCLU_NODE_T*)get_zeroed_page(GFP_NOIO);
else
amap->fclu_nodes = (FCLU_NODE_T*)vzalloc(PAGE_SIZE << amap->fclu_order);
amap->fclu_nodes = (FCLU_NODE_T *)get_zeroed_page(GFP_NOIO);
else
amap->fclu_nodes = vzalloc(PAGE_SIZE << amap->fclu_order);
amap->fclu_hint = amap->clusters_per_au;
@ -468,16 +467,15 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
for (i = 0; i < amap->clusters_per_au; i++)
INIT_LIST_HEAD(&amap->fclu_nodes[i].head);
/*
Thanks to kzalloc()
amap->entries[i_au].free_clusters = 0;
amap->entries[i_au].head.prev = NULL;
amap->entries[i_au].head.next = NULL;
*/
* Thanks to kzalloc()
* amap->entries[i_au].free_clusters = 0;
* amap->entries[i_au].head.prev = NULL;
* amap->entries[i_au].head.next = NULL;
*/
/* Parse FAT table */
for (i_clu = CLUS_BASE; i_clu < fsi->num_clusters; i_clu++){
for (i_clu = CLUS_BASE; i_clu < fsi->num_clusters; i_clu++) {
u32 clu_data;
AU_INFO_T *au;
@ -486,7 +484,7 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
"failed to read fat entry(%u)\n", i_clu);
goto free_and_eio;
}
if (IS_CLUS_FREE(clu_data)) {
au = GET_AU(amap, i_AU_of_CLU(amap, i_clu));
au->free_clusters++;
@ -495,7 +493,7 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
}
/* Build AU list */
for (i_au = 0; i_au < amap->n_au; i_au++){
for (i_au = 0; i_au < amap->n_au; i_au++) {
AU_INFO_T *au = GET_AU(amap, i_au);
au->idx = i_au;
@ -522,13 +520,12 @@ int amap_create(struct super_block *sb, u32 pack_ratio, u32 sect_per_au, u32 hid
amap->total_fclu_hot += GET_AU(amap, i_au_root)->free_clusters;
}
fsi->amap = amap;
fsi->used_clusters = total_used_clusters;
sdfat_msg(sb, KERN_INFO,
sdfat_msg(sb, KERN_INFO,
"AMAP: Smart allocation enabled (opt : %u / %u / %u)",
amap->option.au_size, amap->option.au_align_factor,
amap->option.au_size, amap->option.au_align_factor,
amap->option.packing_ratio);
/* Debug purpose - check */
@ -575,6 +572,7 @@ void amap_destroy(struct super_block *sb)
if (amap->au_table) {
int i;
for (i = 0; i < n_au_table; i++)
free_page((unsigned long)amap->au_table[i]);
@ -586,16 +584,14 @@ void amap_destroy(struct super_block *sb)
vfree(amap->fclu_nodes);
kfree(amap);
SDFAT_SB(sb)->fsi.amap = NULL;
return;
}
/*
Check status of FS
and change destination if needed to disable AU-aligned alloc.
(from ALLOC_COLD_ALIGNED to ALLOC_COLD_SEQ)
*/
* Check status of FS
* and change destination if needed to disable AU-aligned alloc.
* (from ALLOC_COLD_ALIGNED to ALLOC_COLD_SEQ)
*/
static inline int amap_update_dest(AMAP_T *amap, int ori_dest)
{
FS_INFO_T *fsi = &(SDFAT_SB(amap->sb)->fsi);
@ -609,23 +605,22 @@ static inline int amap_update_dest(AMAP_T *amap, int ori_dest)
n_partial_freeclus = fsi->num_clusters - fsi->used_clusters -
amap->clusters_per_au * amap->n_clean_au;
/* Status of AUs : Full / Partial / Clean
If there are many partial (and badly fragmented) AUs,
the throughput will decrease extremly.
/* Status of AUs : Full / Partial / Clean
* If there are many partial (and badly fragmented) AUs,
* the throughput will decrease extremly.
*
* The follow code will treat those worst cases.
*/
The follow code will treat those worst cases.
*/
// XXX: AMAP heuristics
/* XXX: AMAP heuristics */
if ((amap->n_clean_au * 50 <= amap->n_au) &&
(n_partial_freeclus*2) < (n_partial_au*amap->clusters_per_au)) {
/* If clean AUs are fewer than 2% of n_au (80 AUs per 16GB)
and fragment ratio is more than 2 (AVG free_clusters=half AU)
disable clean-first allocation
enable VFAT-like sequential allocation
*/
* and fragment ratio is more than 2 (AVG free_clusters=half AU)
*
* disable clean-first allocation
* enable VFAT-like sequential allocation
*/
return ALLOC_COLD_SEQ;
}
@ -635,39 +630,37 @@ static inline int amap_update_dest(AMAP_T *amap, int ori_dest)
#define PACKING_SOFTLIMIT (amap->option.packing_ratio)
#define PACKING_HARDLIMIT (amap->option.packing_ratio * 4)
/*
Pick a packing AU if needed.
Otherwise just return NULL
This function includes some heuristics.
*/
static inline AU_INFO_T* amap_get_packing_au(AMAP_T *amap, int dest, int num_to_wb, int *clu_to_skip)
/*
* Pick a packing AU if needed.
* Otherwise just return NULL
*
* This function includes some heuristics.
*/
static inline AU_INFO_T *amap_get_packing_au(AMAP_T *amap, int dest, int num_to_wb, int *clu_to_skip)
{
AU_INFO_T* au = NULL;
AU_INFO_T *au = NULL;
if (dest == ALLOC_COLD_PACKING) {
/* ALLOC_COLD_PACKING:
Packing-first mode for defrag.
Optimized to save clean AU
1) best-fit AU
2) Smallest AU (w/ minimum free clusters)
*/
/* ALLOC_COLD_PACKING:
* Packing-first mode for defrag.
* Optimized to save clean AU
*
* 1) best-fit AU
* 2) Smallest AU (w/ minimum free clusters)
*/
if (num_to_wb >= amap->clusters_per_au)
num_to_wb = num_to_wb % amap->clusters_per_au;
num_to_wb = num_to_wb % amap->clusters_per_au;
/* 이거 주석처리하면, AU size 딱 맞을때는 clean, 나머지는 작은거부터 */
if (num_to_wb == 0)
num_to_wb = 1; // Don't use clean AUs
au = amap_find_cold_au_bestfit(amap, num_to_wb);
au = amap_find_cold_au_bestfit(amap, num_to_wb);
if (au && au->free_clusters == amap->clusters_per_au && num_to_wb > 1) {
// if au is clean then get a new partial one
/* if au is clean then get a new partial one */
au = amap_find_cold_au_bestfit(amap, 1);
}
if (au) {
amap->n_need_packing = 0;
amap_remove_cold_au(amap, au);
@ -676,28 +669,27 @@ static inline AU_INFO_T* amap_get_packing_au(AMAP_T *amap, int dest, int num_to_
}
/* Heuristic packing:
This will improve QoS greatly.
Count # of AU_ALLIGNED allocation.
If the number exceeds the specific threshold,
allocate on a partial AU or generate random I/O.
*/
if ((PACKING_SOFTLIMIT > 0) && \
(amap->n_need_packing >= PACKING_SOFTLIMIT) && \
(num_to_wb < (int)amap->clusters_per_au) ){
/* Heuristic packing:
* This will improve QoS greatly.
*
* Count # of AU_ALIGNED allocation.
* If the number exceeds the specific threshold,
* allocate on a partial AU or generate random I/O.
*/
if ((PACKING_SOFTLIMIT > 0) &&
(amap->n_need_packing >= PACKING_SOFTLIMIT) &&
(num_to_wb < (int)amap->clusters_per_au)) {
/* Best-fit packing:
If num_to_wb (expected number to be allocated) is smaller than AU_SIZE,
find a best-fit AU.
*/
* If num_to_wb (expected number to be allocated) is smaller
* than AU_SIZE, find a best-fit AU.
*/
// Back margin (heuristics)
/* Back margin (heuristics) */
if (num_to_wb < amap->clusters_per_au / 4)
num_to_wb = amap->clusters_per_au / 4;
au = amap_find_cold_au_bestfit(amap, num_to_wb);
if ((au != NULL)) {
if (au != NULL) {
amap_remove_cold_au(amap, au);
MMSG("AMAP: packing (cnt: %d) / softlimit, "
@ -712,16 +704,14 @@ static inline AU_INFO_T* amap_get_packing_au(AMAP_T *amap, int dest, int num_to_
return au;
}
}
if (PACKING_HARDLIMIT != 0 && \
amap->n_need_packing >= PACKING_HARDLIMIT) {
/* Compulsory SLC flushing:
If there was no chance to do best-fit packing
and the # of AU-aligned allocation exceeds HARD threshold,
then pick a clean AU and generate a compulsory random I/O.
*/
au = amap_pop_cold_au_largest(amap, amap->clusters_per_au);
if ((PACKING_HARDLIMIT) && amap->n_need_packing >= PACKING_HARDLIMIT) {
/* Compulsory SLC flushing:
* If there was no chance to do best-fit packing
* and the # of AU-aligned allocation exceeds HARD threshold,
* then pick a clean AU and generate a compulsory random I/O.
*/
au = amap_pop_cold_au_largest(amap, amap->clusters_per_au);
if (au) {
MMSG("AMAP: packing (cnt: %d) / hard-limit, largest)\n",
amap->n_need_packing);
@ -741,9 +731,10 @@ static inline AU_INFO_T* amap_get_packing_au(AMAP_T *amap, int dest, int num_to_
}
/* Pick a target AU
- This function should be called only if there are one or more free clusters in the bdev.
*/
/* Pick a target AU:
* This function should be called
* only if there are one or more free clusters in the bdev.
*/
TARGET_AU_T *amap_get_target_au(AMAP_T *amap, int dest, int num_to_wb)
{
int loop_count = 0;
@ -751,10 +742,9 @@ TARGET_AU_T *amap_get_target_au(AMAP_T *amap, int dest, int num_to_wb)
retry:
if (++loop_count >= 3) {
/* No space available (or AMAP consistency error)
This could happen because of the ignored AUs
but not likely
(because the defrag daemon will not work if there is no enough space)
*/
* This could happen because of the ignored AUs but not likely
* (because the defrag daemon will not work if there is no enough space)
*/
BUG_ON(amap->slist_ignored.next == NULL);
return NULL;
}
@ -787,13 +777,12 @@ retry:
return &amap->cur_hot;
}
/* Cold allocation:
If amap->cur_cold.au has one or more free cluster(s),
then just return amap->cur_cold
*/
if ( (!amap->cur_cold.au) \
|| (amap->cur_cold.idx == amap->clusters_per_au) \
* If amap->cur_cold.au has one or more free cluster(s),
* then just return amap->cur_cold
*/
if ((!amap->cur_cold.au)
|| (amap->cur_cold.idx == amap->clusters_per_au)
|| (amap->cur_cold.au->free_clusters == 0)) {
AU_INFO_T *au = NULL;
@ -802,20 +791,21 @@ retry:
if (old_au) {
ASSERT(!IS_AU_WORKING(old_au, amap));
// must be NOT WORKING AU. (only for information gathering)
/* must be NOT WORKING AU.
* (only for information gathering)
*/
}
/* Next target AU is needed:
There are 3 possible ALLOC options for cold AU
ALLOC_COLD_ALGINED: Clean AU first, but heuristic packing is ON
ALLOC_COLD_PACKING: Packing AU first (usually for defrag)
ALLOC_COLD_SEQ : Sequential AU allocation (VFAT-like)
*/
* There are 3 possible ALLOC options for cold AU
*
* ALLOC_COLD_ALIGNED: Clean AU first, but heuristic packing is ON
* ALLOC_COLD_PACKING: Packing AU first (usually for defrag)
* ALLOC_COLD_SEQ : Sequential AU allocation (VFAT-like)
*/
/* Experimental: Modify allocation destination if needed (ALIGNED => SEQ) */
// dest = amap_update_dest(amap, dest);
// dest = amap_update_dest(amap, dest);
if ((dest == ALLOC_COLD_SEQ) && old_au) {
int i_au = old_au->idx + 1;
@ -823,10 +813,9 @@ retry:
while (i_au != old_au->idx) {
au = GET_AU(amap, i_au);
if ((au->free_clusters > 0) &&
!IS_AU_HOT(au, amap) &&
if ((au->free_clusters > 0) &&
!IS_AU_HOT(au, amap) &&
!IS_AU_IGNORED(au, amap)) {
MMSG("AMAP: new cold AU(%d) with %d "
"clusters (seq)\n",
au->idx, au->free_clusters);
@ -845,9 +834,9 @@ retry:
}
/*
* Check if packing is needed
* (ALLOC_COLD_PACKING is treated by this function)
/*
* Check if packing is needed
* (ALLOC_COLD_PACKING is treated by this function)
*/
au = amap_get_packing_au(amap, dest, num_to_wb, &n_clu_to_skip);
if (au) {
@ -855,14 +844,13 @@ retry:
"(packing)\n", au->idx, au->free_clusters);
goto ret_new_cold;
}
/* ALLOC_COLD_ALIGNED */
/* Check if the adjacent AU is clean */
if (old_au && ((old_au->idx + 1) < amap->n_au)) {
au = GET_AU(amap, old_au->idx + 1);
if ((au->free_clusters == amap->clusters_per_au) &&
!IS_AU_HOT(au, amap) &&
if ((au->free_clusters == amap->clusters_per_au) &&
!IS_AU_HOT(au, amap) &&
!IS_AU_IGNORED(au, amap)) {
MMSG("AMAP: new cold AU(%d) with %d clusters "
"(adjacent)\n", au->idx, au->free_clusters);
@ -879,7 +867,7 @@ retry:
goto retry;
}
MMSG("AMAP: New cold AU (%d) with %d clusters\n", \
MMSG("AMAP: New cold AU (%d) with %d clusters\n",
au->idx, au->free_clusters);
ret_new_cold:
@ -897,14 +885,14 @@ ret_new_cold:
void amap_put_target_au(AMAP_T *amap, TARGET_AU_T *cur, int num_allocated)
{
/* Update AMAP info vars. */
if (num_allocated > 0 && \
(cur->au->free_clusters + num_allocated) == amap->clusters_per_au)
// if the target AU was a clean AU before this allocation ...
if (num_allocated > 0 &&
(cur->au->free_clusters + num_allocated) == amap->clusters_per_au) {
/* if the target AU was a clean AU before this allocation ... */
amap->n_clean_au--;
if (num_allocated > 0 && \
}
if (num_allocated > 0 &&
cur->au->free_clusters == 0)
amap->n_full_au++;
if (IS_AU_HOT(cur->au, amap)) {
/* Hot AU */
@ -936,11 +924,9 @@ void amap_put_target_au(AMAP_T *amap, TARGET_AU_T *cur, int num_allocated)
}
/* Reposition target->idx for packing
(Heuristics)
Skip (num_to_skip) free clusters in (cur->au)
*/
/* Reposition target->idx for packing (Heuristics):
* Skip (num_to_skip) free clusters in (cur->au)
*/
static inline int amap_skip_cluster(struct super_block *sb, TARGET_AU_T *cur, int num_to_skip)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
@ -953,12 +939,11 @@ static inline int amap_skip_cluster(struct super_block *sb, TARGET_AU_T *cur, in
}
clu = CLU_of_i_AU(amap, cur->au->idx, cur->idx);
while (num_to_skip > 0) {
if (clu >= CLUS_BASE) {
/* Cf.
* If AMAP's integrity is okay,
* we don't need to check if (clu < fsi->num_clusters)
* we don't need to check if (clu < fsi->num_clusters)
*/
if (fat_ent_get(sb, clu, &read_clu))
@ -980,7 +965,7 @@ static inline int amap_skip_cluster(struct super_block *sb, TARGET_AU_T *cur, in
}
}
MMSG("AMAP: Skip_clusters (%d skipped => %d, among %d free clus)\n",\
MMSG("AMAP: Skip_clusters (%d skipped => %d, among %d free clus)\n",
num_to_skip_orig, cur->idx, cur->au->free_clusters);
return 0;
@ -1005,11 +990,12 @@ s32 amap_fat_alloc_cluster(struct super_block *sb, s32 num_alloc, CHAIN_T *p_cha
if ((fsi->used_clusters + num_alloc) > (fsi->num_clusters - CLUS_BASE)) {
/* Reserved count management error
or called by dir. management function on fully filled disk */
* or called by dir. management function on fully filled disk
*/
num_alloc = fsi->num_clusters - fsi->used_clusters - CLUS_BASE;
if (unlikely(num_alloc < 0)) {
sdfat_fs_error_ratelimit(sb,
sdfat_fs_error_ratelimit(sb,
"AMAP(%s): invalid used clusters(t:%u,u:%u)\n",
__func__, fsi->num_clusters, fsi->used_clusters);
return -EIO;
@ -1040,13 +1026,11 @@ retry_alloc:
}
target_au = cur->au;
/*
/*
* cur->au : target AU info pointer
* cur->idx : the intra-cluster idx in the AU to start from
* cur->idx : the intra-cluster idx in the AU to start from
*/
BUG_ON(!cur->au);
BUG_ON(!cur->au->free_clusters);
BUG_ON(cur->idx >= amap->clusters_per_au);
@ -1075,7 +1059,6 @@ retry_alloc:
else
if (fat_ent_set(sb, last_clu, new_clu))
return -EIO;
last_clu = new_clu;
/* Update au info */
@ -1090,8 +1073,7 @@ retry_alloc:
/* End of the AU */
if ((cur->idx >= amap->clusters_per_au) || !(target_au->free_clusters))
break;
} while(num_allocated_each < num_alloc);
} while (num_allocated_each < num_alloc);
/* Update strategy info */
amap_put_target_au(amap, cur, num_allocated_each);
@ -1118,9 +1100,9 @@ s32 amap_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse)
/*
This is called by fat_free_cluster()
to update AMAP info.
*/
* This is called by fat_free_cluster()
* to update AMAP info.
*/
s32 amap_release_cluster(struct super_block *sb, u32 clu)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
@ -1133,7 +1115,12 @@ s32 amap_release_cluster(struct super_block *sb, u32 clu)
i_au = i_AU_of_CLU(amap, clu);
BUG_ON(i_au >= amap->n_au);
au = GET_AU(amap, i_au);
BUG_ON(au->free_clusters >= amap->clusters_per_au);
if (au->free_clusters >= amap->clusters_per_au) {
sdfat_fs_error(sb, "%s, au->free_clusters(%hd) is "
"greater than or equal to amap->clusters_per_au(%hd)"
, __func__, au->free_clusters, amap->clusters_per_au);
return -EIO;
}
if (IS_AU_HOT(au, amap)) {
MMSG("AMAP: Hot cluster freed\n");
@ -1141,7 +1128,7 @@ s32 amap_release_cluster(struct super_block *sb, u32 clu)
amap->total_fclu_hot++;
} else if (!IS_AU_WORKING(au, amap) && !IS_AU_IGNORED(au, amap)) {
/* Ordinary AU - update AU tree */
// Can be optimized by implmenting amap_update_au
// Can be optimized by implementing amap_update_au
amap_remove_cold_au(amap, au);
au->free_clusters++;
amap_add_cold_au(amap, au);
@ -1161,57 +1148,51 @@ s32 amap_release_cluster(struct super_block *sb, u32 clu)
/*
Check if the cluster is in a working AU
The caller should hold sb lock.
This func. should be used only if smart allocation is on
*/
* Check if the cluster is in a working AU
* The caller should hold sb lock.
* This func. should be used only if smart allocation is on
*/
s32 amap_check_working(struct super_block *sb, u32 clu)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
AU_INFO_T *au;
BUG_ON(!amap);
au = GET_AU(amap, i_AU_of_CLU(amap, clu));
return (IS_AU_WORKING(au, amap));
return IS_AU_WORKING(au, amap);
}
/*
Return the # of free clusters in that AU
*/
* Return the # of free clusters in that AU
*/
s32 amap_get_freeclus(struct super_block *sb, u32 clu)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
AU_INFO_T *au;
BUG_ON(!amap);
au = GET_AU(amap, i_AU_of_CLU(amap, clu));
return ((s32)au->free_clusters);
return (s32)au->free_clusters;
}
/*
Add the AU containing 'clu' to the ignored AU list.
The AU will not be used by the allocator.
XXX: Ignored counter needed
*/
* Add the AU containing 'clu' to the ignored AU list.
* The AU will not be used by the allocator.
*
* XXX: Ignored counter needed
*/
s32 amap_mark_ignore(struct super_block *sb, u32 clu)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
AU_INFO_T *au;
BUG_ON(!amap);
au = GET_AU(amap, i_AU_of_CLU(amap, clu));
if (IS_AU_HOT(au, amap)) {
// Doesn't work with hot AUs
/* Doesn't work with hot AUs */
return -EPERM;
} else if (IS_AU_WORKING(au, amap)) {
return -EBUSY;
@ -1227,17 +1208,15 @@ s32 amap_mark_ignore(struct super_block *sb, u32 clu)
BUG_ON(!IS_AU_IGNORED(au, amap));
//INC_IGN_CNT(au);
MMSG("AMAP: Mark ignored AU (%d)\n", au->idx);
return 0;
}
/*
This function could be used only on IGNORED AUs.
The caller should care whether it's ignored or not before using this func.
*/
* This function could be used only on IGNORED AUs.
* The caller should care whether it's ignored or not before using this func.
*/
s32 amap_unmark_ignore(struct super_block *sb, u32 clu)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
@ -1263,9 +1242,9 @@ s32 amap_unmark_ignore(struct super_block *sb, u32 clu)
}
/*
Unmark all ignored AU
This will return # of unmarked AUs
*/
* Unmark all ignored AU
* This will return # of unmarked AUs
*/
s32 amap_unmark_ignore_all(struct super_block *sb)
{
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
@ -1274,7 +1253,6 @@ s32 amap_unmark_ignore_all(struct super_block *sb)
int n = 0;
BUG_ON(!amap);
entry = amap->slist_ignored.next;
while (entry) {
au = list_entry(entry, AU_INFO_T, shead);
@ -1283,13 +1261,12 @@ s32 amap_unmark_ignore_all(struct super_block *sb)
BUG_ON(!IS_AU_IGNORED(au, amap));
//CLEAR_IGN_CNT(au);
amap_remove_from_list(au, &amap->slist_ignored);
amap_add_cold_au(amap, au);
MMSG("AMAP: Unmark ignored AU (%d)\n", au->idx);
n++;
entry = amap->slist_ignored.next;
}

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_AMAP_H
@ -25,16 +23,19 @@
#include <linux/rbtree.h>
/* AMAP Configuration Variable */
#define SMART_ALLOC_N_HOT_AU 5
#define SMART_ALLOC_N_HOT_AU (5)
/* Allocating Destination (for smart allocator) */
#define ALLOC_COLD_ALIGNED 1
#define ALLOC_COLD_PACKING 2
#define ALLOC_COLD_SEQ 4
/* Allocating Destination (for smart allocator):
* moved to sdfat.h
*/
/*
* #define ALLOC_COLD_ALIGNED (1)
* #define ALLOC_COLD_PACKING (2)
* #define ALLOC_COLD_SEQ (4)
*/
/* Minimum sectors for support AMAP create */
#define AMAP_MIN_SUPPORT_SECTORS 1048576
#define AMAP_MIN_SUPPORT_SECTORS (1048576)
#define amap_add_hot_au(amap, au) amap_insert_to_list(au, &amap->slist_hot)
@ -45,20 +46,20 @@ struct slist_head {
};
/* AU entry type */
typedef struct __AU_INFO_T{
uint16_t idx; /* the index of the AU (0, 1, 2, ... ) */
uint16_t free_clusters; /* # of available cluster */
typedef struct __AU_INFO_T {
uint16_t idx; /* the index of the AU (0, 1, 2, ... ) */
uint16_t free_clusters; /* # of available cluster */
union {
struct list_head head;
struct slist_head shead; /* singly linked list head for hot list */
struct slist_head shead;/* singly linked list head for hot list */
};
} AU_INFO_T;
/* Allocation Target AU */
typedef struct __TARGET_AU_T{
AU_INFO_T *au; /* Working AU */
uint16_t idx; /* Intra-AU cluster index */
typedef struct __TARGET_AU_T {
AU_INFO_T *au; /* Working AU */
uint16_t idx; /* Intra-AU cluster index */
uint16_t clu_to_skip; /* Clusters to skip */
} TARGET_AU_T;
@ -71,29 +72,29 @@ typedef struct {
/* AMAP options */
typedef struct {
unsigned int packing_ratio; /* Tunable packing ratio */
unsigned int au_size; /* AU size in sectors */
unsigned int packing_ratio; /* Tunable packing ratio */
unsigned int au_size; /* AU size in sectors */
unsigned int au_align_factor; /* Hidden sectors % au_size */
} AMAP_OPT_T;
typedef struct __AMAP_T{
spinlock_t amap_lock; // obsolete
typedef struct __AMAP_T {
spinlock_t amap_lock; /* obsolete */
struct super_block *sb;
int n_au;
int n_clean_au, n_full_au;
int clu_align_bias;
uint16_t clusters_per_au;
AU_INFO_T **au_table; /* An array of AU_INFO entries */
AU_INFO_T **au_table; /* An array of AU_INFO entries */
AMAP_OPT_T option;
/* Size-based AU management pool (cold) */
FCLU_NODE_T *fclu_nodes; /* An array of listheads */
int fclu_order; /* Page order that fclu_nodes needs */
int fclu_hint; /* maximum # of free clusters in an AU */
FCLU_NODE_T *fclu_nodes; /* An array of listheads */
int fclu_order; /* Page order that fclu_nodes needs */
int fclu_hint; /* maximum # of free clusters in an AU */
/* Hot AU list */
int total_fclu_hot; /* Free clusters in hot list */
int total_fclu_hot; /* Free clusters in hot list */
struct slist_head slist_hot; /* Hot AU list */
/* Ignored AU list */
@ -113,23 +114,24 @@ typedef struct __AMAP_T{
#define MAX_CLU_PER_AU (1024)
/* Cold AU bucket <-> # of freeclusters */
#define NODE_CLEAN(amap) &amap->fclu_nodes[amap->clusters_per_au - 1]
#define NODE(fclu, amap) &amap->fclu_nodes[fclu - 1]
#define NODE_CLEAN(amap) (&amap->fclu_nodes[amap->clusters_per_au - 1])
#define NODE(fclu, amap) (&amap->fclu_nodes[fclu - 1])
#define FREE_CLUSTERS(node, amap) ((int)(node - amap->fclu_nodes) + 1)
/* AU status */
#define MAGIC_WORKING (struct slist_head*)0xFFFF5091
#define IS_AU_HOT(au, amap) (au->shead.head == &amap->slist_hot)
#define IS_AU_IGNORED(au, amap) (au->shead.head == &amap->slist_ignored)
#define IS_AU_WORKING(au, amap) (au->shead.head == MAGIC_WORKING)
#define SET_AU_WORKING(au) (au->shead.head = MAGIC_WORKING)
#define MAGIC_WORKING ((struct slist_head *)0xFFFF5091)
#define IS_AU_HOT(au, amap) (au->shead.head == &amap->slist_hot)
#define IS_AU_IGNORED(au, amap) (au->shead.head == &amap->slist_ignored)
#define IS_AU_WORKING(au, amap) (au->shead.head == MAGIC_WORKING)
#define SET_AU_WORKING(au) (au->shead.head = MAGIC_WORKING)
/* AU <-> cluster */
#define i_AU_of_CLU(amap, clu) ((amap->clu_align_bias + clu) / amap->clusters_per_au)
#define CLU_of_i_AU(amap, i_au, idx) ((uint32_t)(i_au) * (uint32_t)amap->clusters_per_au + (idx) - amap->clu_align_bias)
#define i_AU_of_CLU(amap, clu) ((amap->clu_align_bias + clu) / amap->clusters_per_au)
#define CLU_of_i_AU(amap, i_au, idx) \
((uint32_t)(i_au) * (uint32_t)amap->clusters_per_au + (idx) - amap->clu_align_bias)
/*
* NOTE : AMAP internal functions are moved to core.h
/*
* NOTE : AMAP internal functions are moved to core.h
*/
#endif /* _SDFAT_AMAP_H */

44
api.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -132,10 +130,11 @@ s32 fsapi_statfs(struct super_block *sb, VOL_INFO_T *info)
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
/* check the validity of pointer parameters */
ASSERT(info);
ASSERT(info);
if (fsi->used_clusters == (u32) ~0) {
s32 err;
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
err = fscore_statfs(sb, info);
mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
@ -156,6 +155,7 @@ EXPORT_SYMBOL(fsapi_statfs);
s32 fsapi_sync_fs(struct super_block *sb, s32 do_sync)
{
s32 err;
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
err = fscore_sync_fs(sb, do_sync);
mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
@ -166,6 +166,7 @@ EXPORT_SYMBOL(fsapi_sync_fs);
s32 fsapi_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_sync)
{
s32 err;
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
err = fscore_set_vol_flags(sb, new_flag, always_sync);
mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
@ -263,7 +264,8 @@ s32 fsapi_truncate(struct inode *inode, u64 old_size, u64 new_size)
EXPORT_SYMBOL(fsapi_truncate);
/* rename or move a old file into a new file */
s32 fsapi_rename(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry)
s32 fsapi_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
struct inode *new_parent_inode, struct dentry *new_dentry)
{
s32 err;
struct super_block *sb = old_parent_inode->i_sb;
@ -299,7 +301,7 @@ s32 fsapi_read_inode(struct inode *inode, DIR_ENTRY_T *info)
{
s32 err;
struct super_block *sb = inode->i_sb;
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
TMSG("%s entered (inode %p info %p\n", __func__, inode, info);
err = fscore_read_inode(inode, info);
@ -414,13 +416,14 @@ s32 fsapi_rmdir(struct inode *inode, FILE_ID_T *fid)
}
EXPORT_SYMBOL(fsapi_rmdir);
/* unlink a file.
* that is, remove an entry from a directory. BUT don't truncate */
/* unlink a file.
* that is, remove an entry from a directory. BUT don't truncate
*/
s32 fsapi_unlink(struct inode *inode, FILE_ID_T *fid)
{
s32 err;
struct super_block *sb = inode->i_sb;
/* check the validity of pointer parameters */
ASSERT(fid);
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
@ -441,7 +444,6 @@ s32 fsapi_cache_flush(struct super_block *sb, int do_sync)
}
EXPORT_SYMBOL(fsapi_cache_flush);
/* release FAT & buf cache */
s32 fsapi_cache_release(struct super_block *sb)
{
@ -457,7 +459,6 @@ s32 fsapi_cache_release(struct super_block *sb)
}
EXPORT_SYMBOL(fsapi_cache_release);
u32 fsapi_get_au_stat(struct super_block *sb, s32 mode)
{
/* volume lock is not required */
@ -490,7 +491,6 @@ s32 fsapi_dfr_get_info(struct super_block *sb, void *arg)
}
EXPORT_SYMBOL(fsapi_dfr_get_info);
s32 fsapi_dfr_scan_dir(struct super_block *sb, void *args)
{
s32 err;
@ -505,23 +505,23 @@ s32 fsapi_dfr_scan_dir(struct super_block *sb, void *args)
}
EXPORT_SYMBOL(fsapi_dfr_scan_dir);
s32 fsapi_dfr_validate_clus(struct inode *inode, void *chunk, int skip_prev)
{
s32 err;
struct super_block *sb = inode->i_sb;
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
err = defrag_validate_cluster(inode,
(struct defrag_chunk_info *)chunk, skip_prev);
err = defrag_validate_cluster(inode,
(struct defrag_chunk_info *)chunk, skip_prev);
mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
return(err);
return err;
}
EXPORT_SYMBOL(fsapi_dfr_validate_clus);
s32 fsapi_dfr_reserve_clus(struct super_block *sb, s32 nr_clus)
{
s32 err;
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
err = defrag_reserve_clusters(sb, nr_clus);
mutex_unlock(&(SDFAT_SB(sb)->s_vlock));
@ -529,7 +529,6 @@ s32 fsapi_dfr_reserve_clus(struct super_block *sb, s32 nr_clus)
}
EXPORT_SYMBOL(fsapi_dfr_reserve_clus);
s32 fsapi_dfr_mark_ignore(struct super_block *sb, unsigned int clus)
{
/* volume lock is not required */
@ -537,7 +536,6 @@ s32 fsapi_dfr_mark_ignore(struct super_block *sb, unsigned int clus)
}
EXPORT_SYMBOL(fsapi_dfr_mark_ignore);
void fsapi_dfr_unmark_ignore_all(struct super_block *sb)
{
/* volume lock is not required */
@ -545,7 +543,6 @@ void fsapi_dfr_unmark_ignore_all(struct super_block *sb)
}
EXPORT_SYMBOL(fsapi_dfr_unmark_ignore_all);
s32 fsapi_dfr_map_clus(struct inode *inode, u32 clu_offset, u32 *clu)
{
s32 err;
@ -562,7 +559,6 @@ s32 fsapi_dfr_map_clus(struct inode *inode, u32 clu_offset, u32 *clu)
}
EXPORT_SYMBOL(fsapi_dfr_map_clus);
void fsapi_dfr_writepage_endio(struct page *page)
{
/* volume lock is not required */
@ -570,7 +566,6 @@ void fsapi_dfr_writepage_endio(struct page *page)
}
EXPORT_SYMBOL(fsapi_dfr_writepage_endio);
void fsapi_dfr_update_fat_prev(struct super_block *sb, int force)
{
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
@ -579,7 +574,6 @@ void fsapi_dfr_update_fat_prev(struct super_block *sb, int force)
}
EXPORT_SYMBOL(fsapi_dfr_update_fat_prev);
void fsapi_dfr_update_fat_next(struct super_block *sb)
{
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
@ -588,7 +582,6 @@ void fsapi_dfr_update_fat_next(struct super_block *sb)
}
EXPORT_SYMBOL(fsapi_dfr_update_fat_next);
void fsapi_dfr_check_discard(struct super_block *sb)
{
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
@ -597,7 +590,6 @@ void fsapi_dfr_check_discard(struct super_block *sb)
}
EXPORT_SYMBOL(fsapi_dfr_check_discard);
void fsapi_dfr_free_clus(struct super_block *sb, u32 clus)
{
mutex_lock(&(SDFAT_SB(sb)->s_vlock));
@ -606,7 +598,6 @@ void fsapi_dfr_free_clus(struct super_block *sb, u32 clus)
}
EXPORT_SYMBOL(fsapi_dfr_free_clus);
s32 fsapi_dfr_check_dfr_required(struct super_block *sb, int *totalau, int *cleanau, int *fullau)
{
/* volume lock is not required */
@ -614,7 +605,6 @@ s32 fsapi_dfr_check_dfr_required(struct super_block *sb, int *totalau, int *clea
}
EXPORT_SYMBOL(fsapi_dfr_check_dfr_required);
s32 fsapi_dfr_check_dfr_on(struct inode *inode, loff_t start, loff_t end, s32 cancel, const char *caller)
{
/* volume lock is not required */

65
api.h
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_API_H
@ -83,13 +81,13 @@ extern "C" {
/* NLS Type Definitions */
/*----------------------------------------------------------------------*/
/* DOS name stucture */
/* DOS name structure */
typedef struct {
u8 name[DOS_NAME_LENGTH];
u8 name_case;
} DOS_NAME_T;
/* unicode name stucture */
/* unicode name structure */
typedef struct {
u16 name[MAX_NAME_LENGTH+3]; /* +3 for null and for converting */
u16 name_hash;
@ -187,8 +185,8 @@ typedef struct {
} FILE_ID_T;
typedef struct {
s8* lfn;
s8* sfn;
s8 *lfn;
s8 *sfn;
s32 lfnbuf_len; //usally MAX_UNINAME_BUF_SIZE
s32 sfnbuf_len; //usally MAX_DOSNAME_BUF_SIZE, used only for vfat, not for exfat
} DENTRY_NAMEBUF_T;
@ -225,32 +223,32 @@ typedef struct __FATENT_OPS_T {
} FATENT_OPS_T;
typedef struct {
s32 (*alloc_cluster)(struct super_block *sb, s32 num_alloc, CHAIN_T *p_chain, int dest);
s32 (*free_cluster)(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse);
s32 (*count_used_clusters)(struct super_block *sb, u32* ret_count);
s32 (*init_dir_entry)(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u32 type,u32 start_clu, u64 size);
s32 (*init_ext_entry)(struct super_block *sb, CHAIN_T *p_dir, s32 entry, s32 num_entries, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname);
s32 (*find_dir_entry)(struct super_block *sb, FILE_ID_T *fid, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *p_dosname, u32 type);
s32 (*delete_dir_entry)(struct super_block *sb, CHAIN_T *p_dir, s32 entry, s32 offset, s32 num_entries);
void (*get_uniname_from_ext_entry)(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u16 *uniname);
s32 (*count_ext_entries)(struct super_block *sb, CHAIN_T *p_dir, s32 entry, DENTRY_T *p_entry);
s32 (*calc_num_entries)(UNI_NAME_T *p_uniname);
u32 (*get_entry_type)(DENTRY_T *p_entry);
void (*set_entry_type)(DENTRY_T *p_entry, u32 type);
u32 (*get_entry_attr)(DENTRY_T *p_entry);
void (*set_entry_attr)(DENTRY_T *p_entry, u32 attr);
u8 (*get_entry_flag)(DENTRY_T *p_entry);
void (*set_entry_flag)(DENTRY_T *p_entry, u8 flag);
u32 (*get_entry_clu0)(DENTRY_T *p_entry);
void (*set_entry_clu0)(DENTRY_T *p_entry, u32 clu0);
u64 (*get_entry_size)(DENTRY_T *p_entry);
void (*set_entry_size)(DENTRY_T *p_entry, u64 size);
void (*get_entry_time)(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode);
void (*set_entry_time)(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode);
u32 (*get_au_stat)(struct super_block *sb, s32 mode);
s32 (*alloc_cluster)(struct super_block *, s32, CHAIN_T *, int);
s32 (*free_cluster)(struct super_block *, CHAIN_T *, s32);
s32 (*count_used_clusters)(struct super_block *, u32 *);
s32 (*init_dir_entry)(struct super_block *, CHAIN_T *, s32, u32, u32, u64);
s32 (*init_ext_entry)(struct super_block *, CHAIN_T *, s32, s32, UNI_NAME_T *, DOS_NAME_T *);
s32 (*find_dir_entry)(struct super_block *, FILE_ID_T *, CHAIN_T *, UNI_NAME_T *, s32, DOS_NAME_T *, u32);
s32 (*delete_dir_entry)(struct super_block *, CHAIN_T *, s32, s32, s32);
void (*get_uniname_from_ext_entry)(struct super_block *, CHAIN_T *, s32, u16 *);
s32 (*count_ext_entries)(struct super_block *, CHAIN_T *, s32, DENTRY_T *);
s32 (*calc_num_entries)(UNI_NAME_T *);
s32 (*check_max_dentries)(FILE_ID_T *);
u32 (*get_entry_type)(DENTRY_T *);
void (*set_entry_type)(DENTRY_T *, u32);
u32 (*get_entry_attr)(DENTRY_T *);
void (*set_entry_attr)(DENTRY_T *, u32);
u8 (*get_entry_flag)(DENTRY_T *);
void (*set_entry_flag)(DENTRY_T *, u8);
u32 (*get_entry_clu0)(DENTRY_T *);
void (*set_entry_clu0)(DENTRY_T *, u32);
u64 (*get_entry_size)(DENTRY_T *);
void (*set_entry_size)(DENTRY_T *, u64);
void (*get_entry_time)(DENTRY_T *, TIMESTAMP_T *, u8);
void (*set_entry_time)(DENTRY_T *, TIMESTAMP_T *, u8);
u32 (*get_au_stat)(struct super_block *, s32);
} FS_FUNC_T;
typedef struct __FS_INFO_T {
s32 bd_opened; // opened or not
u32 vol_type; // volume FAT type
@ -270,7 +268,7 @@ typedef struct __FS_INFO_T {
u32 dentries_in_root; // num of dentries in root dir
u32 dentries_per_clu; // num of dentries per cluster
u32 vol_flag; // volume dirty flag
struct buffer_head *pbr_bh; // buffer_head of PBR sector
struct buffer_head *pbr_bh; // buffer_head of PBR sector
u32 map_clu; // allocation bitmap start cluster
u32 map_sectors; // num of allocation bitmap sectors
@ -334,7 +332,8 @@ s32 fsapi_read_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count
s32 fsapi_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *wcount);
s32 fsapi_remove(struct inode *inode, FILE_ID_T *fid); /* unlink and truncate */
s32 fsapi_truncate(struct inode *inode, u64 old_size, u64 new_size);
s32 fsapi_rename(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry);
s32 fsapi_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
struct inode *new_parent_inode, struct dentry *new_dentry);
s32 fsapi_unlink(struct inode *inode, FILE_ID_T *fid);
s32 fsapi_read_inode(struct inode *inode, DIR_ENTRY_T *info);
s32 fsapi_write_inode(struct inode *inode, DIR_ENTRY_T *info, int sync);

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -50,14 +48,14 @@
/* FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY */
/************************************************************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
/* EMPTY */
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) */
static struct backing_dev_info *inode_to_bdi(struct inode *bd_inode)
{
return bd_inode->i_mapping->backing_dev_info;
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) */
#endif
/*======================================================================*/
/* Function Definitions */
@ -66,7 +64,7 @@ s32 bdev_open_dev(struct super_block *sb)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
if (fsi->bd_opened)
if (fsi->bd_opened)
return 0;
fsi->bd_opened = true;
@ -86,12 +84,13 @@ static inline s32 block_device_ejected(struct super_block *sb)
struct inode *bd_inode = sb->s_bdev->bd_inode;
struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
return bdi->dev == NULL;
return (bdi->dev == NULL);
}
s32 bdev_check_bdi_valid(struct super_block *sb)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
if (block_device_ejected(sb)) {
if (!(fsi->prev_eio & SDFAT_EIO_BDI)) {
fsi->prev_eio |= SDFAT_EIO_BDI;
@ -101,6 +100,7 @@ s32 bdev_check_bdi_valid(struct super_block *sb)
}
return -ENXIO;
}
return 0;
}
@ -113,7 +113,7 @@ s32 bdev_readahead(struct super_block *sb, u32 secno, u32 num_secs)
struct blk_plug plug;
u32 i;
if (!fsi->bd_opened)
if (!fsi->bd_opened)
return -EIO;
blk_start_plug(&plug);
@ -135,11 +135,11 @@ s32 bdev_mread(struct super_block *sb, u32 secno, struct buffer_head **bh, u32 n
struct sdfat_sb_info *sbi = SDFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
#endif /* CONFIG_SDFAT_DBG_IOCTL */
if (!fsi->bd_opened)
if (!fsi->bd_opened)
return -EIO;
brelse(*bh);
@ -153,17 +153,16 @@ s32 bdev_mread(struct super_block *sb, u32 secno, struct buffer_head **bh, u32 n
if (*bh)
return 0;
/*
/*
* patch 1.2.4 : reset ONCE warning message per volume.
*/
if(!(fsi->prev_eio & SDFAT_EIO_READ)) {
if (!(fsi->prev_eio & SDFAT_EIO_READ)) {
fsi->prev_eio |= SDFAT_EIO_READ;
sdfat_log_msg(sb, KERN_ERR, "%s: No bh. I/O error.", __func__);
sdfat_debug_warn_on(1);
}
return -EIO;
}
s32 bdev_mwrite(struct super_block *sb, u32 secno, struct buffer_head *bh, u32 num_secs, s32 sync)
@ -175,11 +174,11 @@ s32 bdev_mwrite(struct super_block *sb, u32 secno, struct buffer_head *bh, u32 n
struct sdfat_sb_info *sbi = SDFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
#endif /* CONFIG_SDFAT_DBG_IOCTL */
if (!fsi->bd_opened)
if (!fsi->bd_opened)
return -EIO;
if (secno == bh->b_blocknr) {
@ -206,14 +205,12 @@ s32 bdev_mwrite(struct super_block *sb, u32 secno, struct buffer_head *bh, u32 n
}
__brelse(bh2);
}
return 0;
no_bh:
/*
/*
* patch 1.2.4 : reset ONCE warning message per volume.
*/
if(!(fsi->prev_eio & SDFAT_EIO_WRITE)) {
if (!(fsi->prev_eio & SDFAT_EIO_WRITE)) {
fsi->prev_eio |= SDFAT_EIO_WRITE;
sdfat_log_msg(sb, KERN_ERR, "%s: No bh. I/O error.", __func__);
sdfat_debug_warn_on(1);
@ -229,11 +226,11 @@ s32 bdev_sync_all(struct super_block *sb)
struct sdfat_sb_info *sbi = SDFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
if (flags & SDFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
#endif /* CONFIG_SDFAT_DBG_IOCTL */
if (!fsi->bd_opened)
if (!fsi->bd_opened)
return -EIO;
return sync_blockdev(sb->s_bdev);
@ -245,85 +242,83 @@ s32 bdev_sync_all(struct super_block *sb)
s32 read_sect(struct super_block *sb, u32 sec, struct buffer_head **bh, s32 read)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
BUG_ON(!bh);
if ( (sec >= fsi->num_sectors)
&& (fsi->num_sectors > 0) ) {
sdfat_fs_error_ratelimit(sb, "%s: out of range (sect:%u)",
__func__, sec);
BUG_ON(!bh);
if ((sec >= fsi->num_sectors) && (fsi->num_sectors > 0)) {
sdfat_fs_error_ratelimit(sb,
"%s: out of range (sect:%u)", __func__, sec);
return -EIO;
}
if (bdev_mread(sb, sec, bh, 1, read)) {
sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%u)",
__func__, sec);
sdfat_fs_error_ratelimit(sb,
"%s: I/O error (sect:%u)", __func__, sec);
return -EIO;
}
return 0;
} /* end of read_sect */
}
s32 write_sect(struct super_block *sb, u32 sec, struct buffer_head *bh, s32 sync)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
BUG_ON(!bh);
if ( (sec >= fsi->num_sectors)
&& (fsi->num_sectors > 0) ) {
sdfat_fs_error_ratelimit(sb, "%s: out of range (sect:%u)",
__func__, sec);
BUG_ON(!bh);
if ((sec >= fsi->num_sectors) && (fsi->num_sectors > 0)) {
sdfat_fs_error_ratelimit(sb,
"%s: out of range (sect:%u)", __func__, sec);
return -EIO;
}
if (bdev_mwrite(sb, sec, bh, 1, sync)) {
sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%u)",
__func__, sec);
__func__, sec);
return -EIO;
}
return 0;
} /* end of write_sect */
}
s32 read_msect(struct super_block *sb, u32 sec, struct buffer_head **bh, s32 num_secs, s32 read)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
BUG_ON(!bh);
if ( ((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0) ) {
BUG_ON(!bh);
if (((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0)) {
sdfat_fs_error_ratelimit(sb, "%s: out of range(sect:%u len:%d)",
__func__ ,sec, num_secs);
__func__, sec, num_secs);
return -EIO;
}
if (bdev_mread(sb, sec, bh, num_secs, read)) {
sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%u len:%d)",
__func__,sec, num_secs);
__func__, sec, num_secs);
return -EIO;
}
return 0;
} /* end of read_msect */
}
s32 write_msect(struct super_block *sb, u32 sec, struct buffer_head *bh, s32 num_secs, s32 sync)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
BUG_ON(!bh);
if ( ((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0) ) {
BUG_ON(!bh);
if (((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0)) {
sdfat_fs_error_ratelimit(sb, "%s: out of range(sect:%u len:%d)",
__func__ ,sec, num_secs);
__func__, sec, num_secs);
return -EIO;
}
if (bdev_mwrite(sb, sec, bh, num_secs, sync)) {
sdfat_fs_error_ratelimit(sb, "%s: I/O error (sect:%u len:%d)",
__func__,sec, num_secs);
__func__, sec, num_secs);
return -EIO;
}
return 0;
} /* end of write_msect */
}
static inline void __blkdev_write_bhs(struct buffer_head **bhs, s32 nr_bhs)
{
@ -347,8 +342,6 @@ static inline s32 __blkdev_sync_bhs(struct buffer_head **bhs, s32 nr_bhs)
static inline s32 __buffer_zeroed(struct super_block *sb, u32 blknr, s32 num_secs)
{
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
s32 nr_bhs = MAX_BUF_PER_PAGE;
u32 last_blknr = blknr + num_secs;
@ -407,14 +400,14 @@ s32 write_msect_zero(struct super_block *sb, u32 sec, s32 num_secs)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
if ( ((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0) ) {
if (((sec+num_secs) > fsi->num_sectors) && (fsi->num_sectors > 0)) {
sdfat_fs_error_ratelimit(sb, "%s: out of range(sect:%u len:%d)",
__func__ ,sec, num_secs);
__func__, sec, num_secs);
return -EIO;
}
/* Just return -EAGAIN if it is failed */
if ( __buffer_zeroed(sb, sec, num_secs))
if (__buffer_zeroed(sb, sec, num_secs))
return -EAGAIN;
return 0;

163
cache.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -48,9 +46,9 @@
/*----------------------------------------------------------------------*/
/* Local Variable Definitions */
/*----------------------------------------------------------------------*/
#define LOCKBIT 0x01
#define DIRTYBIT 0x02
#define KEEPBIT 0x04
#define LOCKBIT (0x01)
#define DIRTYBIT (0x02)
#define KEEPBIT (0x04)
/*----------------------------------------------------------------------*/
/* Cache handling function declarations */
@ -74,7 +72,7 @@ static void push_to_mru(cache_ent_t *bp, cache_ent_t *list)
bp->prev = list;
list->next->prev = bp;
list->next = bp;
} /* end of __dcache_push_to_mru */
}
static void push_to_lru(cache_ent_t *bp, cache_ent_t *list)
{
@ -82,31 +80,31 @@ static void push_to_lru(cache_ent_t *bp, cache_ent_t *list)
bp->next = list;
list->prev->next = bp;
list->prev = bp;
} /* end of __dcache_push_to_lru */
}
static void move_to_mru(cache_ent_t *bp, cache_ent_t *list)
{
bp->prev->next = bp->next;
bp->next->prev = bp->prev;
push_to_mru(bp, list);
} /* end of __dcache_move_to_mru */
}
static void move_to_lru(cache_ent_t *bp, cache_ent_t *list)
{
bp->prev->next = bp->next;
bp->next->prev = bp->prev;
push_to_lru(bp, list);
} /* end of __dcache_move_to_lru */
}
static inline s32 __check_hash_valid(cache_ent_t *bp)
{
#ifdef DEBUG_HASH_LIST
if ( (bp->hash.next == (cache_ent_t*)DEBUG_HASH_NEXT) ||
(bp->hash.prev == (cache_ent_t*)DEBUG_HASH_PREV) ) {
if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
(bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
return -EINVAL;
}
#endif
if ( (bp->hash.next == bp) || (bp->hash.prev == bp) )
if ((bp->hash.next == bp) || (bp->hash.prev == bp))
return -EINVAL;
return 0;
@ -119,15 +117,15 @@ static inline void __remove_from_hash(cache_ent_t *bp)
bp->hash.next = bp;
bp->hash.prev = bp;
#ifdef DEBUG_HASH_LIST
bp->hash.next = (cache_ent_t*)DEBUG_HASH_NEXT;
bp->hash.prev = (cache_ent_t*)DEBUG_HASH_PREV;
bp->hash.next = (cache_ent_t *)DEBUG_HASH_NEXT;
bp->hash.prev = (cache_ent_t *)DEBUG_HASH_PREV;
#endif
}
/* Do FAT mirroring (don't sync)
sec: sector No. in FAT1
bh: bh of sec.
*/
/* Do FAT mirroring (don't sync)
* sec: sector No. in FAT1
* bh: bh of sec.
*/
static inline s32 __fat_copy(struct super_block *sb, u32 sec, struct buffer_head *bh, int sync)
{
#ifdef CONFIG_SDFAT_FAT_MIRRORING
@ -150,7 +148,7 @@ static inline s32 __fat_copy(struct super_block *sb, u32 sec, struct buffer_head
/*
* returns 1, if bp is flushed
* returns 0, if bp is not dirty
* returns 0, if bp is not dirty
* returns -1, if error occurs
*/
static s32 __fcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
@ -176,11 +174,12 @@ static s32 __fcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
static s32 __fcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
__fcache_remove_hash(bp);
bp->sec = ~0;
bp->flag = 0;
if(bp->bh) {
if (bp->bh) {
__brelse(bp->bh);
bp->bh = NULL;
}
@ -202,11 +201,10 @@ u8 *fcache_getblk(struct super_block *sb, u32 sec)
return NULL;
}
move_to_mru(bp, &fsi->fcache.lru_list);
return(bp->bh->b_data);
return bp->bh->b_data;
}
bp = __fcache_get(sb, sec);
if (!__check_hash_valid(bp))
__fcache_remove_hash(bp);
@ -218,10 +216,10 @@ u8 *fcache_getblk(struct super_block *sb, u32 sec)
if ((sec & (page_ra_count - 1)) == 0)
bdev_readahead(sb, sec, page_ra_count);
/*
/*
* patch 1.2.4 : buffer_head null pointer exception problem.
*
* When read_sect is failed, fcache should be moved to
* When read_sect is failed, fcache should be moved to
* EMPTY hash_list and the first of lru_list.
*/
if (read_sect(sb, sec, &(bp->bh), 1)) {
@ -236,9 +234,10 @@ static inline int __mark_delayed_dirty(struct super_block *sb, cache_ent_t *bp)
{
#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
if (fsi->vol_type == EXFAT)
return -ENOTSUPP;
bp->flag |= DIRTYBIT;
return 0;
#else
@ -253,8 +252,10 @@ s32 fcache_modify(struct super_block *sb, u32 sec)
cache_ent_t *bp;
bp = __fcache_find(sb, sec);
if (!bp)
if (!bp) {
sdfat_fs_error(sb, "Can`t find fcache (sec 0x%08x)", sec);
return -EIO;
}
if (!__mark_delayed_dirty(sb, bp))
return 0;
@ -346,15 +347,16 @@ s32 fcache_release_all(struct super_block *sb)
bp = fsi->fcache.lru_list.next;
while (bp != &fsi->fcache.lru_list) {
s32 ret_tmp = __fcache_ent_flush(sb, bp, 0);
if (ret_tmp < 0)
ret = ret_tmp;
else
dirtycnt += ret_tmp;
bp->sec = ~0;
bp->flag = 0;
if(bp->bh) {
if (bp->bh) {
__brelse(bp->bh);
bp->bh = NULL;
}
@ -383,7 +385,7 @@ s32 fcache_flush(struct super_block *sb, u32 sync)
dirtycnt += ret;
bp = bp->next;
}
MMSG("BD: flush / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
return ret;
}
@ -395,11 +397,9 @@ static cache_ent_t *__fcache_find(struct super_block *sb, u32 sec)
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
off = (sec + (sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE - 1);
hp = &(fsi->fcache.hash_list[off]);
for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
if (bp->sec == sec) {
/*
* patch 1.2.4 : for debugging
*/
@ -407,11 +407,11 @@ static cache_ent_t *__fcache_find(struct super_block *sb, u32 sec)
"It will make system panic.\n");
touch_buffer(bp->bh);
return(bp);
return bp;
}
}
return(NULL);
} /* end of __fcache_find */
return NULL;
}
static cache_ent_t *__fcache_get(struct super_block *sb, u32 sec)
{
@ -419,7 +419,6 @@ static cache_ent_t *__fcache_get(struct super_block *sb, u32 sec)
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
bp = fsi->fcache.lru_list.prev;
#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
while (bp->flag & DIRTYBIT) {
cache_ent_t *bp_prev = bp->prev;
@ -436,8 +435,8 @@ static cache_ent_t *__fcache_get(struct super_block *sb, u32 sec)
// sync_dirty_buffer(bp->bh);
move_to_mru(bp, &fsi->fcache.lru_list);
return(bp);
} /* end of __fcache_get */
return bp;
}
static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
{
@ -453,14 +452,14 @@ static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
bp->hash.prev = hp;
hp->hash.next->hash.prev = bp;
hp->hash.next = bp;
} /* end of __fcache_insert_hash */
}
static void __fcache_remove_hash(cache_ent_t *bp)
{
#ifdef DEBUG_HASH_LIST
if ( (bp->hash.next == (cache_ent_t*)DEBUG_HASH_NEXT) ||
(bp->hash.prev == (cache_ent_t*)DEBUG_HASH_PREV) ) {
if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
(bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
"(bp:%p)\n", __func__, bp);
return;
@ -468,7 +467,7 @@ static void __fcache_remove_hash(cache_ent_t *bp)
#endif
WARN_ON(bp->flag & DIRTYBIT);
__remove_from_hash(bp);
} /* end of __fcache_remove_hash */
}
/*======================================================================*/
/* Buffer Read/Write Functions */
@ -483,12 +482,12 @@ s32 dcache_readahead(struct super_block *sb, u32 sec)
u32 adj_ra_count = max(fsi->sect_per_clus, page_ra_count);
u32 ra_count = min(adj_ra_count, max_ra_count);
/* Read-ahead is not required */
/* Read-ahead is not required */
if (fsi->sect_per_clus == 1)
return 0;
if (sec < fsi->data_start_sector) {
EMSG("BD: %s: requested sector is invalid(sect:%u, root:%u)\n",
EMSG("BD: %s: requested sector is invalid(sect:%u, root:%u)\n",
__func__, sec, fsi->data_start_sector);
return -EIO;
}
@ -508,7 +507,7 @@ s32 dcache_readahead(struct super_block *sb, u32 sec)
/*
* returns 1, if bp is flushed
* returns 0, if bp is not dirty
* returns 0, if bp is not dirty
* returns -1, if error occurs
*/
static s32 __dcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
@ -533,15 +532,15 @@ static s32 __dcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
MMSG("%s : bp[%p] (sec:%08x flag:%08x bh:%p) list(prev:%p next:%p) "
"hash(prev:%p next:%p)\n", __func__,
"hash(prev:%p next:%p)\n", __func__,
bp, bp->sec, bp->flag, bp->bh, bp->prev, bp->next,
bp->hash.prev, bp->hash.next);
__dcache_remove_hash(bp);
bp->sec = ~0;
bp->flag = 0;
if(bp->bh) {
if (bp->bh) {
__brelse(bp->bh);
bp->bh = NULL;
}
@ -568,7 +567,7 @@ u8 *dcache_getblk(struct super_block *sb, u32 sec)
if (!(bp->flag & KEEPBIT)) // already in keep list
move_to_mru(bp, &fsi->dcache.lru_list);
return(bp->bh->b_data);
return bp->bh->b_data;
}
bp = __dcache_get(sb, sec);
@ -597,20 +596,21 @@ s32 dcache_modify(struct super_block *sb, u32 sec)
set_sb_dirty(sb);
bp = __dcache_find(sb, sec);
if (likely(bp)) {
#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
if (fsi->vol_type != EXFAT) {
bp->flag |= DIRTYBIT;
return 0;
}
#endif
ret = write_sect(sb, sec, bp->bh, 0);
if (unlikely(!bp)) {
sdfat_fs_error(sb, "Can`t find dcache (sec 0x%08x)", sec);
return -EIO;
}
#ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
if (SDFAT_SB(sb)->fsi.vol_type != EXFAT) {
bp->flag |= DIRTYBIT;
return 0;
}
#endif
ret = write_sect(sb, sec, bp->bh, 0);
if (ret) {
DMSG("%s : failed to modify buffer(err:%d, sec:%u, bp:0x%p)\n",
__func__, ret, sec, bp);
__func__, ret, sec, bp);
}
return ret;
@ -621,7 +621,7 @@ s32 dcache_lock(struct super_block *sb, u32 sec)
cache_ent_t *bp;
bp = __dcache_find(sb, sec);
if (likely(bp)) {
if (likely(bp)) {
bp->flag |= LOCKBIT;
return 0;
}
@ -662,7 +662,7 @@ s32 dcache_release(struct super_block *sb, u32 sec)
bp->sec = ~0;
bp->flag = 0;
if(bp->bh) {
if (bp->bh) {
__brelse(bp->bh);
bp->bh = NULL;
}
@ -678,9 +678,10 @@ s32 dcache_release_all(struct super_block *sb)
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
s32 dirtycnt = 0;
/* Connect list elements */
/* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last) */
while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list){
/* Connect list elements:
* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last)
*/
while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list) {
cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
// bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
move_to_mru(bp_keep, &fsi->dcache.lru_list);
@ -695,11 +696,10 @@ s32 dcache_release_all(struct super_block *sb)
ret = -EIO;
}
#endif
bp->sec = ~0;
bp->flag = 0;
if(bp->bh) {
if (bp->bh) {
__brelse(bp->bh);
bp->bh = NULL;
}
@ -719,14 +719,14 @@ s32 dcache_flush(struct super_block *sb, u32 sync)
s32 dirtycnt = 0;
s32 keepcnt = 0;
/* Connect list elements */
/* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last) */
// XXX: optimization
while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list){
/* Connect list elements:
* LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last)
*/
while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list) {
cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
move_to_mru(bp_keep, &fsi->dcache.lru_list);
keepcnt++;
}
@ -750,7 +750,7 @@ s32 dcache_flush(struct super_block *sb, u32 sync)
bp = bp->next;
}
MMSG("BD: flush / dirty dentry cache: %d (%d from keeplist, err:%d)\n",
MMSG("BD: flush / dirty dentry cache: %d (%d from keeplist, err:%d)\n",
dirtycnt, keepcnt, ret);
return ret;
}
@ -767,11 +767,11 @@ static cache_ent_t *__dcache_find(struct super_block *sb, u32 sec)
for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
if (bp->sec == sec) {
touch_buffer(bp->bh);
return(bp);
return bp;
}
}
return NULL;
} /* end of __dcache_find */
}
static cache_ent_t *__dcache_get(struct super_block *sb, u32 sec)
{
@ -788,7 +788,6 @@ static cache_ent_t *__dcache_get(struct super_block *sb, u32 sec)
bp->flag |= KEEPBIT;
move_to_mru(bp, &fsi->dcache.keep_list);
}
bp = bp_prev;
/* If all dcaches are dirty */
@ -799,15 +798,15 @@ static cache_ent_t *__dcache_get(struct super_block *sb, u32 sec)
}
}
#else
while (bp->flag & LOCKBIT)
while (bp->flag & LOCKBIT)
bp = bp->prev;
#endif
// if (bp->flag & DIRTYBIT)
// sync_dirty_buffer(bp->bh);
move_to_mru(bp, &fsi->dcache.lru_list);
return(bp);
} /* end of __dcache_get */
return bp;
}
static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
{
@ -823,13 +822,13 @@ static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
bp->hash.prev = hp;
hp->hash.next->hash.prev = bp;
hp->hash.next = bp;
} /* end of __dcache_insert_hash */
}
static void __dcache_remove_hash(cache_ent_t *bp)
{
#ifdef DEBUG_HASH_LIST
if ( (bp->hash.next == (cache_ent_t*)DEBUG_HASH_NEXT) ||
(bp->hash.prev == (cache_ent_t*)DEBUG_HASH_PREV) ) {
if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
(bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
"(bp:%p)\n", __func__, bp);
return;
@ -837,7 +836,7 @@ static void __dcache_remove_hash(cache_ent_t *bp)
#endif
WARN_ON(bp->flag & DIRTYBIT);
__remove_from_hash(bp);
} /* end of __dcache_remove_hash */
}
/* end of cache.c */

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_CONFIG_H
@ -65,7 +63,7 @@
#ifndef CONFIG_SDFAT_FAT32_SHORTNAME_SEQ /* Shortname ~1, ... ~9 have higher
* priority (WIN32/VFAT-like)
*/
//#define CONFIG_SDFAT_FAT32_SHORTNAME_SEQ
//#define CONFIG_SDFAT_FAT32_SHORTNAME_SEQ
#endif
#ifndef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
@ -101,7 +99,7 @@
#endif
#ifndef CONFIG_SDFAT_VIRTUAL_XATTR
#define CONFIG_SDFAT_VIRTUAL_XATTR
//#define CONFIG_SDFAT_VIRTUAL_XATTR
#endif
#ifndef CONFIG_SDFAT_SUPPORT_STLOG
@ -114,7 +112,7 @@
#ifndef CONFIG_SDFAT_DBG_IOCTL
//#define CONFIG_SDFAT_DBG_IOCTL
#endif
#endif
#ifndef CONFIG_SDFAT_DBG_MSG
//#define CONFIG_SDFAT_DBG_MSG

448
core.c

File diff suppressed because it is too large Load Diff

22
core.h
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_CORE_H
@ -90,7 +88,8 @@ s32 fscore_create(struct inode *inode, u8 *path, u8 mode, FILE_ID_T *fid);
s32 fscore_read_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *rcount);
s32 fscore_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 count, u64 *wcount);
s32 fscore_truncate(struct inode *inode, u64 old_size, u64 new_size);
s32 fscore_rename(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry);
s32 fscore_rename(struct inode *old_parent_inode, FILE_ID_T *fid,
struct inode *new_parent_inode, struct dentry *new_dentry);
s32 fscore_remove(struct inode *inode, FILE_ID_T *fid);
s32 fscore_read_inode(struct inode *inode, DIR_ENTRY_T *info);
s32 fscore_write_inode(struct inode *inode, DIR_ENTRY_T *info, int sync);
@ -116,17 +115,17 @@ DENTRY_T *get_dentry_in_dir(struct super_block *sb, CHAIN_T *p_dir, s32 entry, u
void get_uniname_from_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, UNI_NAME_T *p_uniname, u8 mode);
/* file operation functions */
s32 walk_fat_chain (struct super_block *sb, CHAIN_T *p_dir, s32 byte_offset, u32 *clu);
s32 walk_fat_chain(struct super_block *sb, CHAIN_T *p_dir, s32 byte_offset, u32 *clu);
/* sdfat/cache.c */
s32 meta_cache_init(struct super_block *sb);
s32 meta_cache_shutdown(struct super_block *sb);
u8* fcache_getblk(struct super_block *sb, u32 sec);
u8 *fcache_getblk(struct super_block *sb, u32 sec);
s32 fcache_modify(struct super_block *sb, u32 sec);
s32 fcache_release_all(struct super_block *sb);
s32 fcache_flush(struct super_block *sb, u32 sync);
u8* dcache_getblk(struct super_block *sb, u32 sec);
u8 *dcache_getblk(struct super_block *sb, u32 sec);
s32 dcache_modify(struct super_block *sb, u32 sec);
s32 dcache_lock(struct super_block *sb, u32 sec);
s32 dcache_unlock(struct super_block *sb, u32 sec);
@ -151,10 +150,11 @@ s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr);
s32 load_alloc_bmp(struct super_block *sb);
void free_alloc_bmp(struct super_block *sb);
ENTRY_SET_CACHE_T *get_dentry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir, s32 entry, u32 type, DENTRY_T **file_ep);
void release_dentry_set (ENTRY_SET_CACHE_T *es);
ENTRY_SET_CACHE_T *get_dentry_set_in_dir(struct super_block *sb,
CHAIN_T *p_dir, s32 entry, u32 type, DENTRY_T **file_ep);
void release_dentry_set(ENTRY_SET_CACHE_T *es);
s32 update_dir_chksum(struct super_block *sb, CHAIN_T *p_dir, s32 entry);
s32 update_dir_chksum_with_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es);
s32 update_dir_chksum_with_entry_set(struct super_block *sb, ENTRY_SET_CACHE_T *es);
bool is_dir_empty(struct super_block *sb, CHAIN_T *p_dir);
s32 mount_exfat(struct super_block *sb, pbr_t *p_pbr);
@ -164,7 +164,7 @@ void amap_destroy(struct super_block *sb);
/* amap_smart.c : (de)allocation functions */
s32 amap_fat_alloc_cluster(struct super_block *sb, s32 num_alloc, CHAIN_T *p_chain, int dest);
s32 amap_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse); /* Free a FAT chain (Not impelmented) */
s32 amap_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_relse);/* Not impelmented */
s32 amap_release_cluster(struct super_block *sb, u32 clu); /* Only update AMAP */
/* amap_smart.c : misc (for defrag) */

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -94,46 +92,44 @@ static u32 exfat_get_entry_type(DENTRY_T *p_entry)
{
FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
if (ep->type == EXFAT_UNUSED) {
if (ep->type == EXFAT_UNUSED)
return TYPE_UNUSED;
} else if (ep->type < 0x80) {
if (ep->type < 0x80)
return TYPE_DELETED;
} else if (ep->type == 0x80) {
if (ep->type == 0x80)
return TYPE_INVALID;
} else if (ep->type < 0xA0) {
if (ep->type == 0x81) {
if (ep->type < 0xA0) {
if (ep->type == 0x81)
return TYPE_BITMAP;
} else if (ep->type == 0x82) {
if (ep->type == 0x82)
return TYPE_UPCASE;
} else if (ep->type == 0x83) {
if (ep->type == 0x83)
return TYPE_VOLUME;
} else if (ep->type == 0x85) {
if (ep->type == 0x85) {
if (le16_to_cpu(ep->attr) & ATTR_SUBDIR)
return TYPE_DIR;
else
return TYPE_FILE;
return TYPE_FILE;
}
return TYPE_CRITICAL_PRI;
} else if (ep->type < 0xC0) {
if (ep->type == 0xA0) {
}
if (ep->type < 0xC0) {
if (ep->type == 0xA0)
return TYPE_GUID;
} else if (ep->type == 0xA1) {
if (ep->type == 0xA1)
return TYPE_PADDING;
} else if (ep->type == 0xA2) {
if (ep->type == 0xA2)
return TYPE_ACLTAB;
}
return TYPE_BENIGN_PRI;
} else if (ep->type < 0xE0) {
if (ep->type == 0xC0) {
}
if (ep->type < 0xE0) {
if (ep->type == 0xC0)
return TYPE_STREAM;
} else if (ep->type == 0xC1) {
if (ep->type == 0xC1)
return TYPE_EXTEND;
} else if (ep->type == 0xC2) {
if (ep->type == 0xC2)
return TYPE_ACL;
}
return TYPE_CRITICAL_SEC;
}
return TYPE_BENIGN_SEC;
} /* end of exfat_get_entry_type */
@ -169,49 +165,57 @@ static void exfat_set_entry_type(DENTRY_T *p_entry, u32 type)
static u32 exfat_get_entry_attr(DENTRY_T *p_entry)
{
FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
return((u32) le16_to_cpu(ep->attr));
FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
return (u32)le16_to_cpu(ep->attr);
} /* end of exfat_get_entry_attr */
static void exfat_set_entry_attr(DENTRY_T *p_entry, u32 attr)
{
FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
ep->attr = cpu_to_le16((u16) attr);
} /* end of exfat_set_entry_attr */
static u8 exfat_get_entry_flag(DENTRY_T *p_entry)
{
STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry;
return(ep->flags);
STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
return ep->flags;
} /* end of exfat_get_entry_flag */
static void exfat_set_entry_flag(DENTRY_T *p_entry, u8 flags)
{
STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry;
STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
ep->flags = flags;
} /* end of exfat_set_entry_flag */
static u32 exfat_get_entry_clu0(DENTRY_T *p_entry)
{
STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry;
return le32_to_cpu(ep->start_clu);
STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
return (u32)le32_to_cpu(ep->start_clu);
} /* end of exfat_get_entry_clu0 */
static void exfat_set_entry_clu0(DENTRY_T *p_entry, u32 start_clu)
{
STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry;
STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
ep->start_clu = cpu_to_le32(start_clu);
} /* end of exfat_set_entry_clu0 */
static u64 exfat_get_entry_size(DENTRY_T *p_entry)
{
STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry;
STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
return le64_to_cpu(ep->valid_size);
} /* end of exfat_get_entry_size */
static void exfat_set_entry_size(DENTRY_T *p_entry, u64 size)
{
STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry;
STRM_DENTRY_T *ep = (STRM_DENTRY_T *)p_entry;
ep->valid_size = cpu_to_le64(size);
ep->size = cpu_to_le64(size);
} /* end of exfat_set_entry_size */
@ -219,7 +223,7 @@ static void exfat_set_entry_size(DENTRY_T *p_entry, u64 size)
static void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
{
u16 t = 0x00, d = 0x21;
FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
switch (mode) {
case TM_CREATE:
@ -247,7 +251,7 @@ static void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
static void exfat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode)
{
u16 t, d;
FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry;
FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry;
t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
d = (tp->year << 9) | (tp->mon << 5) | tp->day;
@ -432,7 +436,8 @@ static s32 exfat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, s32 en
return 0;
}
static s32 __write_partial_entries_in_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es, u32 sec, s32 off, u32 count)
static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
ENTRY_SET_CACHE_T *es, u32 sec, s32 off, u32 count)
{
s32 num_entries, buf_off = (off - es->offset);
u32 remaining_byte_in_sector, copy_entries;
@ -444,10 +449,10 @@ static s32 __write_partial_entries_in_entry_set (struct super_block *sb, ENTRY_S
MMSG("%s: es %p sec %u off %d cnt %d\n", __func__, es, sec, off, count);
num_entries = count;
while(num_entries) {
// white per sector base
while (num_entries) {
/* write per sector base */
remaining_byte_in_sector = (1 << sb->s_blocksize_bits) - off;
copy_entries = min((s32)(remaining_byte_in_sector>> DENTRY_SIZE_BITS) , num_entries);
copy_entries = min((s32)(remaining_byte_in_sector >> DENTRY_SIZE_BITS), num_entries);
buf = dcache_getblk(sb, sec);
if (!buf)
goto err_out;
@ -484,18 +489,18 @@ err_out:
/* write back all entries in entry set */
static s32 __write_whole_entry_set(struct super_block *sb, ENTRY_SET_CACHE_T *es)
{
return __write_partial_entries_in_entry_set(sb, es, es->sector,es->offset, es->num_entries);
return __write_partial_entries_in_entry_set(sb, es, es->sector, es->offset, es->num_entries);
}
s32 update_dir_chksum_with_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es)
s32 update_dir_chksum_with_entry_set(struct super_block *sb, ENTRY_SET_CACHE_T *es)
{
DENTRY_T *ep;
u16 chksum = 0;
s32 chksum_type = CS_DIR_ENTRY, i;
ep = (DENTRY_T *)&(es->__buf);
for (i=0; i < es->num_entries; i++) {
MMSG ("%s %p\n", __func__, ep);
for (i = 0; i < es->num_entries; i++) {
MMSG("%s %p\n", __func__, ep);
chksum = calc_chksum_2byte((void *) ep, DENTRY_SIZE, chksum, chksum_type);
ep++;
chksum_type = CS_DEFAULT;
@ -524,10 +529,11 @@ s32 update_dir_chksum_with_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T
#define ES_MODE_GET_STRM_ENTRY 2
#define ES_MODE_GET_NAME_ENTRY 3
#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4
ENTRY_SET_CACHE_T *get_dentry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir, s32 entry, u32 type, DENTRY_T **file_ep)
ENTRY_SET_CACHE_T *get_dentry_set_in_dir(struct super_block *sb,
CHAIN_T *p_dir, s32 entry, u32 type, DENTRY_T **file_ep)
{
s32 off, ret, byte_offset;
u32 clu=0;
u32 clu = 0;
u32 sec, entry_type;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
ENTRY_SET_CACHE_T *es = NULL;
@ -593,15 +599,15 @@ ENTRY_SET_CACHE_T *get_dentry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir
pos = (DENTRY_T *) &(es->__buf);
while(num_entries) {
while (num_entries) {
// instead of copying whole sector, we will check every entry.
// this will provide minimum stablity and consistancy.
// this will provide minimum stablity and consistency.
entry_type = exfat_get_entry_type(ep);
if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
goto err_out;
switch(mode) {
switch (mode) {
case ES_MODE_STARTED:
if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR))
mode = ES_MODE_GET_FILE_ENTRY;
@ -644,8 +650,8 @@ ENTRY_SET_CACHE_T *get_dentry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir
if (--num_entries == 0)
break;
if ( ((off + DENTRY_SIZE) & (u32)(sb->s_blocksize - 1)) <
(off & (u32)(sb->s_blocksize - 1)) ) {
if (((off + DENTRY_SIZE) & (u32)(sb->s_blocksize - 1)) <
(off & (u32)(sb->s_blocksize - 1))) {
// get the next sector
if (IS_LAST_SECT_IN_CLUS(fsi, sec)) {
if (es->alloc_flag == 0x03)
@ -677,20 +683,20 @@ ENTRY_SET_CACHE_T *get_dentry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir
return es;
err_out:
TMSG("%s exited (return NULL) (es %p)\n", __func__, es);
if (es) {
kfree(es);
es = NULL;
}
/* kfree(NULL) is safe */
kfree(es);
es = NULL;
return NULL;
}
void release_dentry_set (ENTRY_SET_CACHE_T *es)
void release_dentry_set(ENTRY_SET_CACHE_T *es)
{
TMSG("%s %p\n", __func__, es);
if (es) {
kfree(es);
es = NULL;
}
/* kfree(NULL) is safe */
kfree(es);
es = NULL;
}
static s32 __extract_uni_name_from_name_entry(NAME_DENTRY_T *ep, u16 *uniname, s32 order)
@ -701,14 +707,13 @@ static s32 __extract_uni_name_from_name_entry(NAME_DENTRY_T *ep, u16 *uniname, s
/* FIXME : unaligned? */
*uniname = le16_to_cpu(ep->unicode_0_14[i]);
if (*uniname == 0x0)
return(len);
return len;
uniname++;
len++;
}
*uniname = 0x0;
return(len);
return len;
} /* end of __extract_uni_name_from_name_entry */
#define DIRENT_STEP_FILE (0)
@ -722,7 +727,8 @@ static s32 __extract_uni_name_from_name_entry(NAME_DENTRY_T *ep, u16 *uniname, s
* -ENOENT : entry with the name does not exist
* -EIO : I/O error
*/
static s32 exfat_find_dir_entry(struct super_block *sb, FILE_ID_T *fid, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *unused, u32 type)
static s32 exfat_find_dir_entry(struct super_block *sb, FILE_ID_T *fid,
CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *unused, u32 type)
{
s32 i, rewind = 0, dentry = 0, end_eidx = 0, num_ext = 0, len;
s32 order, step, name_len;
@ -909,6 +915,7 @@ found:
/* next dentry we'll find is out of this cluster */
if (!((dentry + 1) & (dentries_per_clu-1))) {
int ret = 0;
if (clu.flags == 0x03) {
if ((--clu.size) > 0)
clu.dir++;
@ -945,11 +952,10 @@ static s32 exfat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, s32 e
return -EIO;
type = exfat_get_entry_type(ext_ep);
if ((type == TYPE_EXTEND) || (type == TYPE_STREAM)) {
if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
count++;
} else {
else
return count;
}
}
return count;
@ -1006,6 +1012,15 @@ static s32 exfat_calc_num_entries(UNI_NAME_T *p_uniname)
} /* end of exfat_calc_num_entries */
static s32 exfat_check_max_dentries(FILE_ID_T *fid)
{
if ((fid->size >> DENTRY_SIZE_BITS) >= MAX_EXFAT_DENTRIES) {
/* exFAT spec allows a dir to grow upto 8388608(256MB) dentries */
return -ENOSPC;
}
return 0;
} /* end of check_max_dentries */
/*
* Allocation Bitmap Management Functions
@ -1013,7 +1028,7 @@ static s32 exfat_calc_num_entries(UNI_NAME_T *p_uniname)
s32 load_alloc_bmp(struct super_block *sb)
{
s32 i, j, ret;
u32 map_size;
u32 map_size, need_map_size;
u32 type, sector;
CHAIN_T clu;
BMAP_DENTRY_T *ep;
@ -1039,8 +1054,18 @@ s32 load_alloc_bmp(struct super_block *sb)
fsi->map_clu = le32_to_cpu(ep->start_clu);
map_size = (u32) le64_to_cpu(ep->size);
fsi->map_sectors = ((map_size-1) >> (sb->s_blocksize_bits)) + 1;
fsi->vol_amap = (struct buffer_head **) kmalloc((sizeof(struct buffer_head *) * fsi->map_sectors), GFP_KERNEL);
need_map_size = (((fsi->num_clusters - CLUS_BASE) - 1) >> 3) + 1;
if (need_map_size != map_size) {
sdfat_log_msg(sb, KERN_ERR,
"bogus allocation bitmap size(need : %u, cur : %u)",
need_map_size, map_size);
/* Only allowed when bogus allocation bitmap size is large */
if (need_map_size > map_size)
return -EIO;
}
fsi->map_sectors = ((need_map_size - 1) >> (sb->s_blocksize_bits)) + 1;
fsi->vol_amap =
kmalloc((sizeof(struct buffer_head *) * fsi->map_sectors), GFP_KERNEL);
if (!fsi->vol_amap)
return -ENOMEM;
@ -1051,14 +1076,13 @@ s32 load_alloc_bmp(struct super_block *sb)
ret = read_sect(sb, sector+j, &(fsi->vol_amap[j]), 1);
if (ret) {
/* release all buffers and free vol_amap */
i=0;
i = 0;
while (i < j)
brelse(fsi->vol_amap[i++]);
if (fsi->vol_amap) {
kfree(fsi->vol_amap);
fsi->vol_amap = NULL;
}
/* kfree(NULL) is safe */
kfree(fsi->vol_amap);
fsi->vol_amap = NULL;
return ret;
}
}
@ -1082,14 +1106,12 @@ void free_alloc_bmp(struct super_block *sb)
brelse(fsi->pbr_bh);
for (i = 0; i < fsi->map_sectors; i++) {
for (i = 0; i < fsi->map_sectors; i++)
__brelse(fsi->vol_amap[i]);
}
if(fsi->vol_amap) {
kfree(fsi->vol_amap);
fsi->vol_amap = NULL;
}
/* kfree(NULL) is safe */
kfree(fsi->vol_amap);
fsi->vol_amap = NULL;
}
/* WARN :
@ -1106,8 +1128,7 @@ static s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
b = clu & (u32)((sb->s_blocksize << 3) - 1);
sector = CLUS_TO_SECT(fsi, fsi->map_clu) + i;
bitmap_set((unsigned long*)(fsi->vol_amap[i]->b_data), b, 1);
bitmap_set((unsigned long *)(fsi->vol_amap[i]->b_data), b, 1);
return write_sect(sb, sector, fsi->vol_amap[i], 0);
} /* end of set_alloc_bitmap */
@ -1130,12 +1151,13 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
sector = CLUS_TO_SECT(fsi, fsi->map_clu) + i;
bitmap_clear((unsigned long*)(fsi->vol_amap[i]->b_data), b, 1);
bitmap_clear((unsigned long *)(fsi->vol_amap[i]->b_data), b, 1);
ret = write_sect(sb, sector, fsi->vol_amap[i], 0);
if (opts->discard) {
s32 ret_discard;
TMSG("discard cluster(%08x)\n", clu+2);
ret_discard = sb_issue_discard(sb, CLUS_TO_SECT(fsi, clu+2),
(1 << fsi->sect_per_clus_bits), GFP_NOFS, 0);
@ -1254,7 +1276,7 @@ static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc, CHAIN_T *p
ASSERT(0);
hint_clu = 2;
if (p_chain->flags == 0x03) {
if (exfat_chain_cont_cluster( sb, p_chain->dir, num_clusters))
if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters))
return -EIO;
p_chain->flags = 0x01;
}
@ -1265,8 +1287,8 @@ static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc, CHAIN_T *p
p_chain->dir = CLUS_EOF;
while ((new_clu = test_alloc_bitmap(sb, hint_clu-2)) != CLUS_EOF) {
if ( (new_clu != hint_clu) && (p_chain->flags == 0x03) ) {
if (exfat_chain_cont_cluster( sb, p_chain->dir, num_clusters))
if ((new_clu != hint_clu) && (p_chain->flags == 0x03)) {
if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters))
return -EIO;
p_chain->flags = 0x01;
}
@ -1395,7 +1417,7 @@ out:
return ret;
} /* end of exfat_free_cluster */
static s32 exfat_count_used_clusters(struct super_block *sb, u32* ret_count)
static s32 exfat_count_used_clusters(struct super_block *sb, u32 *ret_count)
{
u32 count = 0;
u32 i, map_i, map_b;
@ -1406,8 +1428,8 @@ static s32 exfat_count_used_clusters(struct super_block *sb, u32* ret_count)
for (i = 0; i < total_clus; i += 8) {
u8 k = *(((u8 *) fsi->vol_amap[map_i]->b_data) + map_b);
count += used_bit[k];
count += used_bit[k];
if ((++map_b) >= (u32)sb->s_blocksize) {
map_i++;
map_b = 0;
@ -1438,6 +1460,7 @@ static FS_FUNC_T exfat_fs_func = {
.get_uniname_from_ext_entry = exfat_get_uniname_from_ext_entry,
.count_ext_entries = exfat_count_ext_entries,
.calc_num_entries = exfat_calc_num_entries,
.check_max_dentries = exfat_check_max_dentries,
.get_entry_type = exfat_get_entry_type,
.set_entry_type = exfat_set_entry_type,
@ -1496,7 +1519,7 @@ s32 mount_exfat(struct super_block *sb, pbr_t *p_pbr)
fsi->fs_func = &exfat_fs_func;
fat_ent_ops_init(sb);
if (p_bpb->bsx.vol_flags & VOL_DIRTY) {
fsi->vol_flag |= VOL_DIRTY;
sdfat_log_msg(sb, KERN_WARNING, "Volume was not properly "

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -141,7 +139,7 @@ static u32 __calc_default_au_size(struct super_block *sb)
out:
if (sb->s_blocksize != 512) {
ASSERT(sb->s_blocksize_bits > 9);
sdfat_log_msg(sb, KERN_INFO,
sdfat_log_msg(sb, KERN_INFO,
"adjustment est_au_size by logical block size(%lu)",
sb->s_blocksize);
est_au_sect >>= (sb->s_blocksize_bits - 9);
@ -250,26 +248,31 @@ static s32 fat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_rel
goto out;
}
}
prev = clu;
if (get_next_clus(sb, &clu))
goto out;
/* FAT validity check */
if (IS_CLUS_FREE(clu)) {
/* GRACEFUL ERROR HANDLING */
/* Broken FAT chain (Already FREE) */
sdfat_fs_error(sb, "%s : deleting FAT entry beyond EOF (clu[%u]->0)", __func__, prev);
if (get_next_clus_safe(sb, &clu)) {
/* print more helpful log */
if (IS_CLUS_BAD(clu)) {
sdfat_log_msg(sb, KERN_ERR, "%s : "
"deleting bad cluster (clu[%u]->BAD)",
__func__, prev);
} else if (IS_CLUS_FREE(clu)) {
sdfat_log_msg(sb, KERN_ERR, "%s : "
"deleting free cluster (clu[%u]->FREE)",
__func__, prev);
}
goto out;
}
/* Free FAT chain */
if (fat_ent_set(sb, prev, CLUS_FREE))
goto out;
/* Update AMAP if needed */
if (fsi->amap)
amap_release_cluster(sb, prev);
if (fsi->amap) {
if (amap_release_cluster(sb, prev))
return -EIO;
}
num_clusters++;
@ -283,7 +286,7 @@ out:
return ret;
} /* end of fat_free_cluster */
static s32 fat_count_used_clusters(struct super_block *sb, u32* ret_count)
static s32 fat_count_used_clusters(struct super_block *sb, u32 *ret_count)
{
s32 i;
u32 clu, count = 0;
@ -307,7 +310,7 @@ static s32 fat_count_used_clusters(struct super_block *sb, u32* ret_count)
*/
static u32 fat_get_entry_type(DENTRY_T *p_entry)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
/* first byte of 32bytes dummy */
if (*(ep->name) == MSDOS_UNUSED)
@ -335,7 +338,7 @@ static u32 fat_get_entry_type(DENTRY_T *p_entry)
static void fat_set_entry_type(DENTRY_T *p_entry, u32 type)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
if (type == TYPE_UNUSED)
*(ep->name) = MSDOS_UNUSED; /* 0x0 */
@ -358,14 +361,16 @@ static void fat_set_entry_type(DENTRY_T *p_entry, u32 type)
static u32 fat_get_entry_attr(DENTRY_T *p_entry)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
return((u32) ep->attr);
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
return (u32)ep->attr;
} /* end of fat_get_entry_attr */
static void fat_set_entry_attr(DENTRY_T *p_entry, u32 attr)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
ep->attr = (u8) attr;
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
ep->attr = (u8)attr;
} /* end of fat_set_entry_attr */
static u8 fat_get_entry_flag(DENTRY_T *p_entry)
@ -379,27 +384,30 @@ static void fat_set_entry_flag(DENTRY_T *p_entry, u8 flags)
static u32 fat_get_entry_clu0(DENTRY_T *p_entry)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
/* FIXME : is ok? */
return(((u32)(le16_to_cpu(ep->start_clu_hi)) << 16) | le16_to_cpu(ep->start_clu_lo));
} /* end of fat_get_entry_clu0 */
static void fat_set_entry_clu0(DENTRY_T *p_entry, u32 start_clu)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
ep->start_clu_lo = cpu_to_le16(CLUSTER_16(start_clu));
ep->start_clu_hi = cpu_to_le16(CLUSTER_16(start_clu >> 16));
} /* end of fat_set_entry_clu0 */
static u64 fat_get_entry_size(DENTRY_T *p_entry)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
return((u64) le32_to_cpu(ep->size));
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
return (u64)le32_to_cpu(ep->size);
} /* end of fat_get_entry_size */
static void fat_set_entry_size(DENTRY_T *p_entry, u64 size)
{
DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry;
DOS_DENTRY_T *ep = (DOS_DENTRY_T *)p_entry;
ep->size = cpu_to_le32((u32)size);
} /* end of fat_set_entry_size */
@ -488,7 +496,7 @@ static void __init_ext_entry(EXT_DENTRY_T *ep, s32 order, u8 chksum, u16 *uninam
}
/* aligned name */
for (i = 0; i < 6; i ++) {
for (i = 0; i < 6; i++) {
if (!end) {
ep->unicode_5_10[i] = cpu_to_le16(*uniname);
if (*uniname == 0x0)
@ -608,7 +616,8 @@ static inline s32 __get_dentries_per_clu(FS_INFO_T *fsi, s32 clu)
return fsi->dentries_per_clu;
}
static s32 fat_find_dir_entry(struct super_block *sb, FILE_ID_T *fid, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *p_dosname, u32 type)
static s32 fat_find_dir_entry(struct super_block *sb, FILE_ID_T *fid,
CHAIN_T *p_dir, UNI_NAME_T *p_uniname, s32 num_entries, DOS_NAME_T *p_dosname, u32 type)
{
s32 i, rewind = 0, dentry = 0, end_eidx = 0;
s32 chksum = 0, lfn_ord = 0, lfn_len = 0;
@ -685,8 +694,8 @@ rewind:
}
/* invalid lfn order */
if ( !cur_ord || (cur_ord > MAX_LFN_ORDER) ||
((cur_ord + 1) != lfn_ord) )
if (!cur_ord || (cur_ord > MAX_LFN_ORDER) ||
((cur_ord + 1) != lfn_ord))
goto reset_dentry_set;
/* check checksum of directory entry set */
@ -702,7 +711,7 @@ rewind:
continue;
}
if(!uniname) {
if (!uniname) {
sdfat_fs_error(sb,
"%s : abnormal dentry "
"(start_clu[%u], "
@ -760,16 +769,16 @@ rewind:
*/
if (!lfn_len || (cur_chksum != chksum)) {
/* check shortname */
if ( (p_dosname->name[0] != '\0') &&
if ((p_dosname->name[0] != '\0') &&
!nls_cmp_sfn(sb,
p_dosname->name,
dos_ep->name) ) {
dos_ep->name)) {
goto found;
}
/* check name length */
} else if ( (lfn_len > 0) &&
} else if ((lfn_len > 0) &&
((s32)p_uniname->name_len ==
lfn_len) ) {
lfn_len)) {
goto found;
}
@ -872,12 +881,12 @@ static s32 fat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, s32 ent
chksum = calc_chksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0);
for (entry--; entry >= 0; entry--) {
ext_ep = (EXT_DENTRY_T*)get_dentry_in_dir(sb,p_dir,entry,NULL);
ext_ep = (EXT_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, NULL);
if (!ext_ep)
return -EIO;
if ( (fat_get_entry_type((DENTRY_T*)ext_ep) == TYPE_EXTEND) &&
(ext_ep->checksum == chksum) ) {
if ((fat_get_entry_type((DENTRY_T *)ext_ep) == TYPE_EXTEND) &&
(ext_ep->checksum == chksum)) {
count++;
if (ext_ep->order > MSDOS_LAST_LFN)
return count;
@ -900,7 +909,7 @@ static s32 __extract_uni_name_from_ext_entry(EXT_DENTRY_T *ep, u16 *uniname, s32
for (i = 0; i < 5; i++) {
*uniname = get_unaligned_le16(&(ep->unicode_0_4[i<<1]));
if (*uniname == 0x0)
return(len);
return len;
uniname++;
len++;
}
@ -910,7 +919,7 @@ static s32 __extract_uni_name_from_ext_entry(EXT_DENTRY_T *ep, u16 *uniname, s32
/* FIXME : unaligned? */
*uniname = le16_to_cpu(ep->unicode_5_10[i]);
if (*uniname == 0x0)
return(len);
return len;
uniname++;
len++;
}
@ -919,25 +928,25 @@ static s32 __extract_uni_name_from_ext_entry(EXT_DENTRY_T *ep, u16 *uniname, s32
/* FIXME : unaligned? */
*uniname = le16_to_cpu(ep->unicode_5_10[i]);
if (*uniname == 0x0)
return(len);
return len;
uniname++;
len++;
}
*uniname = 0x0; /* uniname[MAX_NAME_LENGTH] */
return(len);
return len;
}
for (i = 0; i < 2; i++) {
/* FIXME : unaligned? */
*uniname = le16_to_cpu(ep->unicode_11_12[i]);
if (*uniname == 0x0)
return(len);
return len;
uniname++;
len++;
}
*uniname = 0x0;
return(len);
return len;
} /* end of __extract_uni_name_from_ext_entry */
@ -959,6 +968,7 @@ static void fat_get_uniname_from_ext_entry(struct super_block *sb, CHAIN_T *p_di
for (entry--, i = 1; entry >= 0; entry--, i++) {
EXT_DENTRY_T *ep;
ep = (EXT_DENTRY_T *)get_dentry_in_dir(sb, p_dir, entry, NULL);
if (!ep)
goto invalid_lfn;
@ -980,13 +990,13 @@ static void fat_get_uniname_from_ext_entry(struct super_block *sb, CHAIN_T *p_di
}
invalid_lfn:
*uniname = (u16)0x0;
return;
} /* end of fat_get_uniname_from_ext_entry */
/* Find if the shortname exists
and check if there are free entries
*/
static s32 __fat_find_shortname_entry(struct super_block *sb, CHAIN_T *p_dir, u8 *p_dosname, s32 *offset, __attribute__((unused))int n_entry_needed)
* and check if there are free entries
*/
static s32 __fat_find_shortname_entry(struct super_block *sb, CHAIN_T *p_dir,
u8 *p_dosname, s32 *offset, __attribute__((unused))int n_entry_needed)
{
u32 type;
s32 i, dentry = 0;
@ -1004,7 +1014,7 @@ static s32 __fat_find_shortname_entry(struct super_block *sb, CHAIN_T *p_dir, u8
else
dentries_per_clu = fsi->dentries_per_clu;
while(!IS_CLUS_EOF(clu.dir)) {
while (!IS_CLUS_EOF(clu.dir)) {
for (i = 0; i < dentries_per_clu; i++, dentry++) {
ep = get_dentry_in_dir(sb, &clu, i, NULL);
if (!ep)
@ -1072,8 +1082,9 @@ s32 fat_generate_dos_name_new(struct super_block *sb, CHAIN_T *p_dir, DOS_NAME_T
memset(work, ' ', DOS_NAME_LENGTH);
memcpy(work, p_dosname->name, DOS_NAME_LENGTH);
while(baselen && (work[--baselen] == ' '));
while (baselen && (work[--baselen] == ' ')) {
/* DO NOTHING, JUST FOR CHECK_PATCH */
}
if (baselen > 6)
baselen = 6;
@ -1091,7 +1102,7 @@ s32 fat_generate_dos_name_new(struct super_block *sb, CHAIN_T *p_dir, DOS_NAME_T
/* void return */
__fat_attach_count_to_dos_name(p_dosname->name, i);
return 0;
}
}
/* any other error */
if (err)
@ -1141,6 +1152,14 @@ static s32 fat_calc_num_entries(UNI_NAME_T *p_uniname)
} /* end of calc_num_enties */
static s32 fat_check_max_dentries(FILE_ID_T *fid)
{
if ((fid->size >> DENTRY_SIZE_BITS) >= MAX_FAT_DENTRIES) {
/* FAT spec allows a dir to grow upto 65536 dentries */
return -ENOSPC;
}
return 0;
} /* end of check_max_dentries */
/*
@ -1158,6 +1177,7 @@ static FS_FUNC_T fat_fs_func = {
.get_uniname_from_ext_entry = fat_get_uniname_from_ext_entry,
.count_ext_entries = fat_count_ext_entries,
.calc_num_entries = fat_calc_num_entries,
.check_max_dentries = fat_check_max_dentries,
.get_entry_type = fat_get_entry_type,
.set_entry_type = fat_set_entry_type,
@ -1185,6 +1205,7 @@ static FS_FUNC_T amap_fat_fs_func = {
.get_uniname_from_ext_entry = fat_get_uniname_from_ext_entry,
.count_ext_entries = fat_count_ext_entries,
.calc_num_entries = fat_calc_num_entries,
.check_max_dentries = fat_check_max_dentries,
.get_entry_type = fat_get_entry_type,
.set_entry_type = fat_set_entry_type,
@ -1348,7 +1369,7 @@ s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr)
fsi->num_clusters = ((fsi->num_sectors-num_reserved) >> fsi->sect_per_clus_bits) + 2;
/* because the cluster index starts with 2 */
fsi->vol_type = FAT32;
fsi->vol_id = get_unaligned_le32(p_bpb->bsx.vol_serial);
@ -1382,23 +1403,23 @@ s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr)
"sector : bpb(%u) != ondisk(%u)",
hidden_sectors, calc_hid_sect);
if (SDFAT_SB(sb)->options.adj_hidsect) {
sdfat_log_msg(sb, KERN_INFO,
sdfat_log_msg(sb, KERN_INFO,
"adjustment hidden sector : "
"bpb(%u) -> ondisk(%u)",
"bpb(%u) -> ondisk(%u)",
hidden_sectors, calc_hid_sect);
hidden_sectors = calc_hid_sect;
}
}
SDFAT_SB(sb)->options.amap_opt.misaligned_sect = hidden_sectors;
/* calculate AU size if it's not set */
if (!SDFAT_SB(sb)->options.amap_opt.sect_per_au) {
SDFAT_SB(sb)->options.amap_opt.sect_per_au =
SDFAT_SB(sb)->options.amap_opt.sect_per_au =
__calc_default_au_size(sb);
}
ret = amap_create(sb,
ret = amap_create(sb,
SDFAT_SB(sb)->options.amap_opt.pack_ratio,
SDFAT_SB(sb)->options.amap_opt.sect_per_au,
SDFAT_SB(sb)->options.amap_opt.misaligned_sect);
@ -1412,8 +1433,8 @@ s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr)
}
/* Check dependency of mount options */
if (SDFAT_SB(sb)->options.improved_allocation !=
(SDFAT_ALLOC_DELAY | SDFAT_ALLOC_SMART) ) {
if (SDFAT_SB(sb)->options.improved_allocation !=
(SDFAT_ALLOC_DELAY | SDFAT_ALLOC_SMART)) {
sdfat_log_msg(sb, KERN_INFO, "disabling defragmentation because"
" smart, delay options are disabled");
SDFAT_SB(sb)->options.defrag = 0;

144
dfr.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -62,7 +60,7 @@ defrag_get_info(
arg->total_sec = fsi->num_sectors;
arg->fat_offset_sec = fsi->FAT1_start_sector;
arg->fat_sz_sec = fsi->num_FAT_sectors;
arg->n_fat = (fsi->FAT1_start_sector == fsi->FAT2_start_sector)? 1:2;
arg->n_fat = (fsi->FAT1_start_sector == fsi->FAT2_start_sector) ? 1 : 2;
arg->sec_per_au = amap->option.au_size;
arg->hidden_sectors = amap->option.au_align_factor % amap->option.au_size;
@ -139,7 +137,7 @@ error:
* @return 0 on success, -errno otherwise
* @param sb super block
* @param args traverse args
* @remark protected by i_mutex, super_block and volume lock
* @remark protected by inode_lock, super_block and volume lock
*/
int
defrag_scan_dir(
@ -149,7 +147,7 @@ defrag_scan_dir(
struct sdfat_sb_info *sbi = NULL;
FS_INFO_T *fsi = NULL;
struct defrag_trav_header *header = NULL;
IN DOS_DENTRY_T *dos_ep;
DOS_DENTRY_T *dos_ep;
CHAIN_T chain;
int dot_found = 0, args_idx = DFR_TRAV_HEADER_IDX + 1, clus = 0, index = 0;
int err = 0, j = 0;
@ -186,9 +184,9 @@ defrag_scan_dir(
}
/* For more-scan case */
if ((header->stat == DFR_TRAV_STAT_MORE) &&
(header->start_clus == sbi->dfr_hint_clus) &&
(sbi->dfr_hint_idx > 0) ) {
if ((header->stat == DFR_TRAV_STAT_MORE) &&
(header->start_clus == sbi->dfr_hint_clus) &&
(sbi->dfr_hint_idx > 0)) {
index = sbi->dfr_hint_idx;
for (j = 0; j < (sbi->dfr_hint_idx / fsi->dentries_per_clu); j++) {
@ -236,7 +234,7 @@ scan_fat_chain:
err = __defrag_scan_dir(sb, dos_ep, i_pos, &args[args_idx]);
if (!err) {
/* More-scan case */
if ( ++args_idx >= (PAGE_SIZE / sizeof(struct defrag_trav_arg)) ) {
if (++args_idx >= (PAGE_SIZE / sizeof(struct defrag_trav_arg))) {
sbi->dfr_hint_clus = header->start_clus;
sbi->dfr_hint_idx = clus * fsi->dentries_per_clu + index + 1;
@ -252,10 +250,10 @@ scan_fat_chain:
/* End case */
} else if (err == -ENOENT) {
sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
err = 0;
err = 0;
goto done;
} else {
;
/* DO NOTHING */
}
err = 0;
}
@ -299,7 +297,7 @@ __defrag_validate_cluster_prev(
dir.flags = 0x1; // Assume non-continuous
entry = GET64_LO(chunk->i_pos);
FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
ERR_HANDLE(err);
ep = get_dentry_in_dir(sb, &dir, entry, NULL);
@ -369,7 +367,7 @@ __defrag_check_au(
{
unsigned int nr_free = amap_get_freeclus(sb, clus);
#if defined(CONFIG_SDFAT_DFR_DEBUG) || defined(CONFIG_SDFAT_DBG_MSG)
#if defined(CONFIG_SDFAT_DFR_DEBUG) && defined(CONFIG_SDFAT_DBG_MSG)
if (nr_free < limit) {
AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
AU_INFO_T *au = GET_AU(amap, i_AU_of_CLU(amap, clus));
@ -377,8 +375,7 @@ __defrag_check_au(
dfr_debug("AU[%d] nr_free %d, limit %d", au->idx, nr_free, limit);
}
#endif
return ((nr_free < limit)? 1 : 0);
return ((nr_free < limit) ? 1 : 0);
}
@ -406,15 +403,15 @@ defrag_validate_cluster(
if (fid->dir.dir == DIR_DELETED)
return -ENOENT;
/* Skip working-AU */
err = amap_check_working(sb, chunk->d_clus);
if (err)
return -EBUSY;
/* Skip working-AU */
err = amap_check_working(sb, chunk->d_clus);
if (err)
return -EBUSY;
/* Check # of free_clus of belonged AU */
err = __defrag_check_au(inode->i_sb, chunk->d_clus, CLUS_PER_AU(sb) - chunk->au_clus);
if (err)
return -EINVAL;
err = __defrag_check_au(inode->i_sb, chunk->d_clus, CLUS_PER_AU(sb) - chunk->au_clus);
if (err)
return -EINVAL;
/* Check chunk's clusters */
for (i = 0; i < chunk->nr_clus; i++) {
@ -457,7 +454,7 @@ defrag_reserve_clusters(
struct sdfat_sb_info *sbi = SDFAT_SB(sb);
FS_INFO_T *fsi = &(sbi->fsi);
if ( !(sbi->options.improved_allocation & SDFAT_ALLOC_DELAY) )
if (!(sbi->options.improved_allocation & SDFAT_ALLOC_DELAY))
/* Nothing to do */
return 0;
@ -519,8 +516,6 @@ defrag_unmark_ignore_all(struct super_block *sb)
{
if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
amap_unmark_ignore_all(sb);
return;
}
@ -535,7 +530,7 @@ defrag_unmark_ignore_all(struct super_block *sb)
*/
int
defrag_map_cluster(
struct inode *inode,
struct inode *inode,
unsigned int clu_offset,
unsigned int *clu)
{
@ -596,11 +591,10 @@ defrag_map_cluster(
/* Make FAT-chain for new_clus */
for (i = 0; i < chunk->nr_clus; i++) {
#if 0
if (sbi->dfr_new_clus[chunk->new_idx + i]) {
if (sbi->dfr_new_clus[chunk->new_idx + i])
nr_new++;
} else {
else
break;
}
#else
if (!sbi->dfr_new_clus[chunk->new_idx + i])
break;
@ -611,7 +605,7 @@ defrag_map_cluster(
for (i = 0; i < chunk->nr_clus - 1; i++) {
FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
BUG_ON(err);
if (fat_ent_set(sb,
if (fat_ent_set(sb,
sbi->dfr_new_clus[chunk->new_idx + i],
sbi->dfr_new_clus[chunk->new_idx + i + 1]))
return -EIO;
@ -654,11 +648,11 @@ defrag_writepage_end_io(
chunk_start = chunk->f_clus;
chunk_end = chunk->f_clus + chunk->nr_clus;
if ( (clus_start >= chunk_start) && (clus_end <= chunk_end) ) {
if ((clus_start >= chunk_start) && (clus_end <= chunk_end)) {
int off = clus_start - chunk_start;
clear_bit( (page->index & (PAGES_PER_CLUS(sb) - 1)),
(volatile unsigned long *)&(sbi->dfr_page_wb[chunk->new_idx + off]) );
clear_bit((page->index & (PAGES_PER_CLUS(sb) - 1)),
(volatile unsigned long *)&(sbi->dfr_page_wb[chunk->new_idx + off]));
}
}
}
@ -683,7 +677,7 @@ __defrag_check_wb(
/* Check WB complete status first */
for (wb_i = 0; wb_i < chunk->nr_clus; wb_i++) {
if ( atomic_read((atomic_t *)&(sbi->dfr_page_wb[chunk->new_idx + wb_i])) ) {
if (atomic_read((atomic_t *)&(sbi->dfr_page_wb[chunk->new_idx + wb_i]))) {
err = -EBUSY;
break;
}
@ -700,7 +694,7 @@ __defrag_check_wb(
if (nr_new == chunk->nr_clus) {
err = 0;
if ( (wb_i != chunk->nr_clus) && (wb_i != chunk->nr_clus - 1) )
if ((wb_i != chunk->nr_clus) && (wb_i != chunk->nr_clus - 1))
dfr_debug("submit_fullpage_bio() called on a page (nr_clus %d, wb_i %d)",
chunk->nr_clus, wb_i);
@ -741,9 +735,9 @@ __defrag_check_fat_old(
err = fat_ent_get(sb, clus, &clus);
ERR_HANDLE(err);
if ( (idx < max_idx - 1) && (IS_CLUS_EOF(clus) || IS_CLUS_FREE(clus)) ) {
if ((idx < max_idx - 1) && (IS_CLUS_EOF(clus) || IS_CLUS_FREE(clus))) {
dfr_err("FAT: inode %p, max_idx %d, idx %d, clus %08x, "
"f_clus %d, nr_clus %d", inode, max_idx,
"f_clus %d, nr_clus %d", inode, max_idx,
idx, clus, chunk->f_clus, chunk->nr_clus);
BUG_ON(idx < max_idx - 1);
goto error;
@ -804,14 +798,13 @@ __defrag_check_fat_new(
BUG_ON(err);
err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], &clus);
BUG_ON(err);
if ( (chunk->next_clus & 0x0FFFFFFF) != (clus & 0x0FFFFFFF) ) {
if ((chunk->next_clus & 0x0FFFFFFF) != (clus & 0x0FFFFFFF)) {
dfr_err("FAT: inode %p, next_clus %08x, read_clus %08x", inode, chunk->next_clus, clus);
err = EIO;
}
error:
BUG_ON(err);
return;
}
@ -872,7 +865,7 @@ defrag_update_fat_prev(
struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
int skip = 0, done = 0;
/* Check if FS_ERROR occured */
/* Check if FS_ERROR occurred */
if (sb->s_flags & MS_RDONLY) {
dfr_err("RDONLY partition (err %d)", -EPERM);
goto out;
@ -885,7 +878,7 @@ defrag_update_fat_prev(
int i = 0, j = 0;
mutex_lock(&ino_dfr->lock);
BUG_ON (atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ);
BUG_ON(atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ);
for (i = 0; i < ino_dfr->nr_chunks; i++) {
struct defrag_chunk_info *chunk = NULL;
int err = 0;
@ -906,15 +899,16 @@ defrag_update_fat_prev(
}
/* Double-check clusters */
if ( chunk_prev &&
if (chunk_prev &&
(chunk->f_clus == chunk_prev->f_clus + chunk_prev->nr_clus) &&
(chunk_prev->stat == DFR_CHUNK_STAT_PASS) ) {
(chunk_prev->stat == DFR_CHUNK_STAT_PASS)) {
err = defrag_validate_cluster(inode, chunk, 1);
/* Handle continuous chunks in a file */
if (!err) {
chunk->prev_clus = sbi->dfr_new_clus[chunk_prev->new_idx + chunk_prev->nr_clus - 1];
chunk->prev_clus =
sbi->dfr_new_clus[chunk_prev->new_idx + chunk_prev->nr_clus - 1];
dfr_debug("prev->f_clus %d, prev->nr_clus %d, chunk->f_clus %d",
chunk_prev->f_clus, chunk_prev->nr_clus, chunk->f_clus);
}
@ -932,7 +926,7 @@ defrag_update_fat_prev(
* Skip update_fat_prev if WB or update_fat_next not completed.
* Go to error case if FORCE set.
*/
if ( __defrag_check_wb(sbi, chunk) || (chunk->stat != DFR_CHUNK_STAT_PREP) ) {
if (__defrag_check_wb(sbi, chunk) || (chunk->stat != DFR_CHUNK_STAT_PREP)) {
if (force) {
err = -EPERM;
dfr_err("Skip case: inode %p, stat %x, f_clus %d, err %d",
@ -1014,13 +1008,13 @@ error:
out:
if (skip) {
dfr_debug("%s skipped (nr_reqs %d, done %d, skip %d)",
dfr_debug("%s skipped (nr_reqs %d, done %d, skip %d)",
__func__, sb_dfr->nr_chunks - 1, done, skip);
} else {
/* Make dfr_reserved_clus zero */
if (sbi->dfr_reserved_clus > 0) {
if (fsi->reserved_clusters < sbi->dfr_reserved_clus) {
dfr_err("Reserved count: reserved_clus %d, dfr_reserved_clus %d",
dfr_err("Reserved count: reserved_clus %d, dfr_reserved_clus %d",
fsi->reserved_clusters, sbi->dfr_reserved_clus);
BUG_ON(fsi->reserved_clusters < sbi->dfr_reserved_clus);
}
@ -1050,7 +1044,7 @@ defrag_update_fat_next(
struct defrag_chunk_info *chunk = NULL;
int done = 0, i = 0, j = 0, err = 0;
/* Check if FS_ERROR occured */
/* Check if FS_ERROR occurred */
if (sb->s_flags & MS_RDONLY) {
dfr_err("RDONLY partition (err %d)", -EROFS);
goto out;
@ -1063,7 +1057,7 @@ defrag_update_fat_next(
chunk = &(ino_dfr->chunks[i]);
/* Do nothing if error occured or update_fat_next already passed */
/* Do nothing if error occurred or update_fat_next already passed */
if (chunk->stat == DFR_CHUNK_STAT_ERR)
continue;
if (chunk->stat & DFR_CHUNK_STAT_FAT) {
@ -1083,7 +1077,7 @@ defrag_update_fat_next(
/* Update chunk's next cluster */
FAT32_CHECK_CLUSTER(fsi,
sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
BUG_ON(err);
if (fat_ent_set(sb,
sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1],
@ -1126,8 +1120,8 @@ defrag_check_discard(
BUG_ON(!amap);
if ( !(SDFAT_SB(sb)->options.discard) ||
!(SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART) )
if (!(SDFAT_SB(sb)->options.discard) ||
!(SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART))
return;
memset(tmp, 0, sizeof(int) * DFR_MAX_AU_MOVED);
@ -1139,8 +1133,8 @@ defrag_check_discard(
au = GET_AU(amap, i_AU_of_CLU(amap, chunk->d_clus));
/* Send DISCARD for free AU */
if ( (IS_AU_IGNORED(au, amap)) &&
(amap_get_freeclus(sb, chunk->d_clus) == CLUS_PER_AU(sb)) ) {
if ((IS_AU_IGNORED(au, amap)) &&
(amap_get_freeclus(sb, chunk->d_clus) == CLUS_PER_AU(sb))) {
sector_t blk = 0, nr_blks = 0;
unsigned int au_align_factor = amap->option.au_align_factor % amap->option.au_size;
@ -1157,11 +1151,11 @@ defrag_check_discard(
continue;
/* Send DISCARD cmd */
blk = (sector_t) ( ((au->idx * CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits)
- au_align_factor );
blk = (sector_t) (((au->idx * CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits)
- au_align_factor);
nr_blks = ((sector_t)CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits;
dfr_debug("Send DISCARD for AU[%d] (blk %08llx)", au->idx, blk);
dfr_debug("Send DISCARD for AU[%d] (blk %08zx)", au->idx, blk);
sb_issue_discard(sb, blk, nr_blks, GFP_NOFS, 0);
/* Save previous AU's index */
@ -1204,7 +1198,7 @@ defrag_free_cluster(
dfr_err("Free: Already freed, clus %08x, val %08x", clus, val);
BUG_ON(!val);
}
set_sb_dirty(sb);
fsi->used_clusters--;
if (fsi->amap)
@ -1236,14 +1230,14 @@ defrag_check_defrag_required(
int clean_ratio = 0, frag_ratio = 0;
int ret = 0;
if( !(sb) || !(SDFAT_SB(sb)->options.defrag) )
if (!sb || !(SDFAT_SB(sb)->options.defrag))
return 0;
/* Check DFR_DEFAULT_STOP_RATIO first */
fsi = &(SDFAT_SB(sb)->fsi);
if (fsi->used_clusters == (unsigned int)(~0)) {
if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
return -EIO;
if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
return -EIO;
}
if (fsi->used_clusters * DFR_FULL_RATIO >= fsi->num_clusters * DFR_DEFAULT_STOP_RATIO) {
dfr_debug("used_clusters %d, num_clusters %d", fsi->used_clusters, fsi->num_clusters);
@ -1262,12 +1256,11 @@ defrag_check_defrag_required(
(fsi->used_clusters * CLUS_PER_AU(sb));
/*
* Wake-up defrag_daemon
* when # of clean AUs too small,
* or frag_ratio exceeds the limit
* Wake-up defrag_daemon:
* when # of clean AUs too small, or frag_ratio exceeds the limit
*/
if ( (clean_ratio < DFR_DEFAULT_WAKEUP_RATIO) ||
((clean_ratio < DFR_DEFAULT_CLEAN_RATIO) && (frag_ratio >= DFR_DEFAULT_FRAG_RATIO)) ) {
if ((clean_ratio < DFR_DEFAULT_WAKEUP_RATIO) ||
((clean_ratio < DFR_DEFAULT_CLEAN_RATIO) && (frag_ratio >= DFR_DEFAULT_FRAG_RATIO))) {
if (totalau)
*totalau = amap->n_au;
@ -1315,8 +1308,8 @@ defrag_check_defrag_on(
if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) {
clus_start = start >> (fsi->cluster_size_bits);
clus_end = (end >> (fsi->cluster_size_bits)) +
((end & (fsi->cluster_size - 1))? 1:0);
clus_end = (end >> (fsi->cluster_size_bits)) +
((end & (fsi->cluster_size - 1)) ? 1 : 0);
if (!ino_dfr->chunks)
goto error;
@ -1326,18 +1319,17 @@ defrag_check_defrag_on(
struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
unsigned int chunk_start = 0, chunk_end = 0;
/* Skip this chunk when error occured or it already passed defrag process */
/* Skip this chunk when error occurred or it already passed defrag process */
if ((chunk->stat == DFR_CHUNK_STAT_ERR) || (chunk->stat == DFR_CHUNK_STAT_PASS))
continue;
chunk_start = chunk->f_clus;
chunk_end = chunk->f_clus + chunk->nr_clus;
if ( ((clus_start >= chunk_start) && (clus_start < chunk_end)) ||
if (((clus_start >= chunk_start) && (clus_start < chunk_end)) ||
((clus_end > chunk_start) && (clus_end <= chunk_end)) ||
((clus_start < chunk_start) && (clus_end > chunk_end)) ) {
ret = 1;
((clus_start < chunk_start) && (clus_end > chunk_end))) {
ret = 1;
if (cancel) {
chunk->stat = DFR_CHUNK_STAT_ERR;
dfr_debug("Defrag canceled: inode %p, start %08x, end %08x, caller %s",
@ -1371,15 +1363,13 @@ defrag_spo_test(
{
struct sdfat_sb_info *sbi = SDFAT_SB(sb);
if( !(sb) || !(SDFAT_SB(sb)->options.defrag) )
if (!sb || !(SDFAT_SB(sb)->options.defrag))
return;
if (flag == sbi->dfr_spo_flag) {
dfr_err("Defrag SPO test (flag %d, caller %s)", flag, caller);
panic("Defrag SPO test");
}
return;
}
#endif /* CONFIG_SDFAT_DFR_DEBUG */

60
dfr.h
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_DEFRAG_H
@ -23,45 +21,47 @@
#ifdef CONFIG_SDFAT_DFR
/* Tuning parameters */
#define DFR_MIN_TIMEOUT (1 * HZ) // Minimum timeout for forced-sync
#define DFR_DEFAULT_TIMEOUT (10 * HZ) // Default timeout for forced-sync
#define DFR_MIN_TIMEOUT (1 * HZ) // Minimum timeout for forced-sync
#define DFR_DEFAULT_TIMEOUT (10 * HZ) // Default timeout for forced-sync
#define DFR_DEFAULT_CLEAN_RATIO (50) // Wake-up daemon when clean AU ratio under 50%
#define DFR_DEFAULT_WAKEUP_RATIO (10) // Wake-up daemon when clean AU ratio under 10%, regardless of frag_ratio
#define DFR_DEFAULT_CLEAN_RATIO (50) // Wake-up daemon when clean AU ratio under 50%
#define DFR_DEFAULT_WAKEUP_RATIO (10) // Wake-up daemon when clean AU ratio under 10%, regardless of frag_ratio
#define DFR_DEFAULT_FRAG_RATIO (130) // Wake-up daemon when frag_ratio over 130%
#define DFR_DEFAULT_FRAG_RATIO (130) // Wake-up daemon when frag_ratio over 130%
#define DFR_DEFAULT_PACKING_RATIO (10) // Call allocator with PACKING flag, when clean AU ratio under 10%
#define DFR_DEFAULT_PACKING_RATIO (10) // Call allocator with PACKING flag, when clean AU ratio under 10%
#define DFR_DEFAULT_STOP_RATIO (98) // Stop defrag_daemon when disk used ratio over 98%
#define DFR_FULL_RATIO (100)
#define DFR_DEFAULT_STOP_RATIO (98) // Stop defrag_daemon when disk used ratio over 98%
#define DFR_FULL_RATIO (100)
#define DFR_MAX_AU_MOVED (16) // Maximum # of AUs for a request
#define DFR_MAX_AU_MOVED (16) // Maximum # of AUs for a request
/* Debugging support*/
#define dfr_err(fmt, args...) EMSG("DFR: " fmt "\n", args)
#define dfr_err(fmt, args...) pr_err("DFR: " fmt "\n", args)
#ifdef CONFIG_SDFAT_DFR_DEBUG
#define dfr_debug(fmt, args...) DMSG("DFR: " fmt "\n", args)
#define dfr_debug(fmt, args...) pr_debug("DFR: " fmt "\n", args)
#else
#define dfr_debug(fmt, args...)
#endif
/* Error handling */
#define ERR_HANDLE(err) \
if (err) { \
dfr_debug("err %d", err); \
goto error; \
}
#define ERR_HANDLE(err) { \
if (err) { \
dfr_debug("err %d", err); \
goto error; \
} \
}
#define ERR_HANDLE2(cond, err, val) \
if (cond) { \
err = val; \
dfr_debug("err %d", err); \
goto error; \
}
#define ERR_HANDLE2(cond, err, val) { \
if (cond) { \
err = val; \
dfr_debug("err %d", err); \
goto error; \
} \
}
/* Arguments IN-OUT */
@ -91,16 +91,16 @@
(SDFAT_SB(sb)->options.amap_opt.sect_per_au) >> (SDFAT_SB(sb)->fsi.sect_per_clus_bits) \
)
#define PAGES_PER_AU(sb) ( \
( (SDFAT_SB(sb)->options.amap_opt.sect_per_au) << ((sb)->s_blocksize_bits) ) \
((SDFAT_SB(sb)->options.amap_opt.sect_per_au) << ((sb)->s_blocksize_bits)) \
>> PAGE_SHIFT \
)
#define PAGES_PER_CLUS(sb) ((SDFAT_SB(sb)->fsi.cluster_size) >> PAGE_SHIFT)
#define FAT32_CHECK_CLUSTER(fsi, clus, err) \
{ \
if ( ((clus) < FAT32_UNUSED_CLUS) || \
if (((clus) < FAT32_UNUSED_CLUS) || \
((clus) > (fsi)->num_clusters) || \
((clus) >= FAT32_RESERVED) ) { \
((clus) >= FAT32_RESERVED)) { \
dfr_err("clus %08x, fsi->num_clusters %08x", (clus), (fsi)->num_clusters); \
err = -EINVAL; \
} else { \
@ -143,7 +143,7 @@ struct defrag_trav_arg {
char dummy1;
int dummy2;
};
#define DFR_TRAV_STAT_DONE (0x1)
#define DFR_TRAV_STAT_MORE (0x2)
#define DFR_TRAV_STAT_ERR (0xFF)
@ -156,7 +156,7 @@ struct defrag_trav_header {
char stat;
unsigned int nr_entries;
};
/* IOC_DFR_REQ */
#define REQ_HEADER_IDX (0)

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@ -272,7 +270,7 @@ static inline void cache_init(struct extent_cache_id *cid, s32 fclus, u32 dclus)
}
s32 extent_get_clus(struct inode *inode, s32 cluster, s32 *fclus,
u32 *dclus, u32 *last_dclus, s32 allow_eof)
u32 *dclus, u32 *last_dclus, s32 allow_eof)
{
struct super_block *sb = inode->i_sb;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
@ -347,11 +345,11 @@ s32 extent_get_clus(struct inode *inode, s32 cluster, s32 *fclus,
break;
}
if (!cache_contiguous(&cid, *dclus))
cache_init(&cid, *fclus, *dclus);
}
extent_cache_add(inode, &cid);
return 0;
}

127
fatent.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -53,7 +51,7 @@
static s32 exfat_ent_get(struct super_block *sb, u32 loc, u32 *content)
{
u32 sec, off, _content;
u8 *fat_sector;
u8 *fat_sector;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
/* fsi->vol_type == EXFAT */
@ -64,12 +62,11 @@ static s32 exfat_ent_get(struct super_block *sb, u32 loc, u32 *content)
if (!fat_sector)
return -EIO;
_content = le32_to_cpu(*(__le32*)(&fat_sector[off]));
if (_content >= CLUSTER_32(0xFFFFFFF8)) {
//return 0xFFFFFFFF to simplify code
*content = CLUS_EOF;
return 0;
}
_content = le32_to_cpu(*(__le32 *)(&fat_sector[off]));
/* remap reserved clusters to simplify code */
if (_content >= CLUSTER_32(0xFFFFFFF8))
_content = CLUS_EOF;
*content = CLUSTER_32(_content);
return 0;
@ -78,7 +75,7 @@ static s32 exfat_ent_get(struct super_block *sb, u32 loc, u32 *content)
static s32 exfat_ent_set(struct super_block *sb, u32 loc, u32 content)
{
u32 sec, off;
u8 *fat_sector;
u8 *fat_sector;
__le32 *fat_entry;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
@ -95,10 +92,12 @@ static s32 exfat_ent_set(struct super_block *sb, u32 loc, u32 content)
return fcache_modify(sb, sec);
}
#define FATENT_FAT32_VALID_MASK (0x0FFFFFFFU)
#define FATENT_FAT32_IGNORE_MASK (0xF0000000U)
static s32 fat32_ent_get(struct super_block *sb, u32 loc, u32 *content)
{
u32 sec, off, _content;
u8 *fat_sector;
u8 *fat_sector;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2));
@ -108,14 +107,14 @@ static s32 fat32_ent_get(struct super_block *sb, u32 loc, u32 *content)
if (!fat_sector)
return -EIO;
_content = le32_to_cpu(*(__le32*)(&fat_sector[off]));
_content &= 0x0FFFFFFF;
_content = le32_to_cpu(*(__le32 *)(&fat_sector[off]));
_content &= FATENT_FAT32_VALID_MASK;
if (_content >= CLUSTER_32(0x0FFFFFF8)) {
//return 0xFFFFFFFF to simplify code
*content = CLUS_EOF;
return 0;
}
/* remap reserved clusters to simplify code */
if (_content == CLUSTER_32(0x0FFFFFF7U))
_content = CLUS_BAD;
else if (_content >= CLUSTER_32(0x0FFFFFF8U))
_content = CLUS_EOF;
*content = CLUSTER_32(_content);
return 0;
@ -128,7 +127,7 @@ static s32 fat32_ent_set(struct super_block *sb, u32 loc, u32 content)
__le32 *fat_entry;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
content &= 0x0FFFFFFF;
content &= FATENT_FAT32_VALID_MASK;
sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2));
off = (loc << 2) & (u32)(sb->s_blocksize - 1);
@ -138,34 +137,35 @@ static s32 fat32_ent_set(struct super_block *sb, u32 loc, u32 content)
return -EIO;
fat_entry = (__le32 *)&(fat_sector[off]);
content |= (le32_to_cpu(*fat_entry) & 0xF0000000);
content |= (le32_to_cpu(*fat_entry) & FATENT_FAT32_IGNORE_MASK);
*fat_entry = cpu_to_le32(content);
return fcache_modify(sb, sec);
}
#define FATENT_FAT16_VALID_MASK (0x0000FFFFU)
static s32 fat16_ent_get(struct super_block *sb, u32 loc, u32 *content)
{
u32 sec, off, _content;
u8 *fat_sector;
u8 *fat_sector;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-1));
off = (loc << 1) & (u32)(sb->s_blocksize - 1);
fat_sector = fcache_getblk(sb, sec);
if(!fat_sector)
if (!fat_sector)
return -EIO;
_content = (u32)le16_to_cpu(*(__le16*)(&fat_sector[off]));
_content &= 0x0000FFFF;
_content = (u32)le16_to_cpu(*(__le16 *)(&fat_sector[off]));
_content &= FATENT_FAT16_VALID_MASK;
/* remap reserved clusters to simplify code */
if (_content == CLUSTER_16(0xFFF7U))
_content = CLUS_BAD;
else if (_content >= CLUSTER_16(0xFFF8U))
_content = CLUS_EOF;
if (_content >= CLUSTER_16(0xFFF8)) {
// return 0x0FFFFFFF to simplify code
*content = CLUS_EOF;
return 0;
}
*content = CLUSTER_32(_content);
return 0;
}
@ -177,7 +177,7 @@ static s32 fat16_ent_set(struct super_block *sb, u32 loc, u32 content)
__le16 *fat_entry;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
content &= 0x0000FFFF;
content &= FATENT_FAT16_VALID_MASK;
sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-1));
off = (loc << 1) & (u32)(sb->s_blocksize - 1);
@ -186,16 +186,17 @@ static s32 fat16_ent_set(struct super_block *sb, u32 loc, u32 content)
if (!fat_sector)
return -EIO;
fat_entry = (__le16*)&(fat_sector[off]);
fat_entry = (__le16 *)&(fat_sector[off]);
*fat_entry = cpu_to_le16(content);
return fcache_modify(sb, sec);
}
#define FATENT_FAT12_VALID_MASK (0x00000FFFU)
static s32 fat12_ent_get(struct super_block *sb, u32 loc, u32 *content)
{
u32 sec, off, _content;
u8 *fat_sector;
u8 *fat_sector;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
sec = fsi->FAT1_start_sector + ((loc + (loc >> 1)) >> sb->s_blocksize_bits);
@ -217,15 +218,16 @@ static s32 fat12_ent_get(struct super_block *sb, u32 loc, u32 *content)
_content = get_unaligned_le16(&fat_sector[off]);
}
if (loc & 1) _content >>= 4;
if (loc & 1)
_content >>= 4;
_content &= 0x00000FFF;
_content &= FATENT_FAT12_VALID_MASK;
if (_content >= CLUSTER_16(0x0FF8)) {
/* return 0xFFFFFFFF to simplify code */
*content = CLUS_EOF;
return 0;
}
/* remap reserved clusters to simplify code */
if (_content == CLUSTER_16(0x0FF7U))
_content = CLUS_BAD;
else if (_content >= CLUSTER_16(0x0FF8U))
_content = CLUS_EOF;
*content = CLUSTER_32(_content);
return 0;
@ -237,7 +239,7 @@ static s32 fat12_ent_set(struct super_block *sb, u32 loc, u32 content)
u8 *fat_sector, *fat_entry;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
content &= 0x00000FFF;
content &= FATENT_FAT12_VALID_MASK;
sec = fsi->FAT1_start_sector + ((loc + (loc >> 1)) >> sb->s_blocksize_bits);
off = (loc + (loc >> 1)) & (u32)(sb->s_blocksize - 1);
@ -262,9 +264,7 @@ static s32 fat12_ent_set(struct super_block *sb, u32 loc, u32 content)
fat_sector[0] = (u8)(content >> 8);
} else {
fat_entry = &(fat_sector[off]);
content |= 0x000F &
get_unaligned_le16(fat_entry);
content |= 0x000F & get_unaligned_le16(fat_entry);
put_unaligned_le16(content, fat_entry);
}
} else { /* even */
@ -282,9 +282,7 @@ static s32 fat12_ent_set(struct super_block *sb, u32 loc, u32 content)
fat_sector[0] = (u8)((fat_sector[0] & 0xF0) | (content >> 8));
} else {
fat_entry = &(fat_sector[off]);
content |= 0xF000 &
get_unaligned_le16(fat_entry);
content |= 0xF000 & get_unaligned_le16(fat_entry);
put_unaligned_le16(content, fat_entry);
}
}
@ -338,12 +336,30 @@ s32 fat_ent_ops_init(struct super_block *sb)
return 0;
}
static inline bool is_reserved_clus(u32 clus)
{
if (IS_CLUS_FREE(clus))
return true;
if (IS_CLUS_EOF(clus))
return true;
if (IS_CLUS_BAD(clus))
return true;
return false;
}
static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus)
{
if (clus < CLUS_BASE || fsi->num_clusters <= clus)
return false;
return true;
}
s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
s32 err;
if (loc < CLUS_BASE || fsi->num_clusters <= loc) {
if (!is_valid_clus(fsi, loc)) {
sdfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", loc);
return -EIO;
}
@ -355,8 +371,7 @@ s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content)
return err;
}
if (*content && !IS_CLUS_EOF(*content) &&
(*content < CLUS_BASE || fsi->num_clusters <= *content)) {
if (!is_reserved_clus(*content) && !is_valid_clus(fsi, *content)) {
sdfat_fs_error(sb, "invalid access to FAT (entry 0x%08x) "
"bogus content (0x%08x)", loc, *content);
return -EIO;
@ -368,17 +383,25 @@ s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content)
s32 fat_ent_set(struct super_block *sb, u32 loc, u32 content)
{
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
return fsi->fatent_ops->ent_set(sb, loc, content);
}
s32 fat_ent_get_safe(struct super_block *sb, u32 loc, u32 *content)
{
s32 err = fat_ent_get(sb, loc, content);
if (err)
return err;
if (IS_CLUS_FREE(*content)) {
sdfat_fs_error(sb, "invalid access to free FAT "
sdfat_fs_error(sb, "invalid access to FAT free cluster "
"(entry 0x%08x)", loc);
return -EIO;
}
if (IS_CLUS_BAD(*content)) {
sdfat_fs_error(sb, "invalid access to FAT bad cluster "
"(entry 0x%08x)", loc);
return -EIO;
}

53
misc.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@ -70,7 +68,7 @@ void __sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_ERR "[SDFAT](%s[%d:%d]):ERR: %pV\n",
pr_err("[SDFAT](%s[%d:%d]):ERR: %pV\n",
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
#ifdef CONFIG_SDFAT_SUPPORT_STLOG
if (opts->errors == SDFAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) {
@ -82,11 +80,11 @@ void __sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
}
if (opts->errors == SDFAT_ERRORS_PANIC) {
panic("[SDFAT](%s[%d:%d]): fs panic from previous error\n",
panic("[SDFAT](%s[%d:%d]): fs panic from previous error\n",
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
} else if (opts->errors == SDFAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) {
sb->s_flags |= MS_RDONLY;
printk(KERN_ERR "[SDFAT](%s[%d:%d]): Filesystem has been set "
pr_err("[SDFAT](%s[%d:%d]): Filesystem has been set "
"read-only\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev));
#ifdef CONFIG_SDFAT_SUPPORT_STLOG
ST_LOG("[SDFAT](%s[%d:%d]): Filesystem has been set read-only\n",
@ -97,9 +95,9 @@ void __sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
EXPORT_SYMBOL(__sdfat_fs_error);
/**
* __sdfat_msg() - print preformated FAT specific messages.
* Every thing what is not sdfat_fs_error() should be __sdfat_msg().
* If 'st' is set to 1, it means that this message should be saved on ST_LOG.
* __sdfat_msg() - print preformated SDFAT specific messages.
* All logs except what uses sdfat_fs_error() should be written by __sdfat_msg()
* If 'st' is set, the log is propagated to ST_LOG.
*/
void __sdfat_msg(struct super_block *sb, const char *level, int st, const char *fmt, ...)
{
@ -111,11 +109,12 @@ void __sdfat_msg(struct super_block *sb, const char *level, int st, const char *
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
/* level means KERN_ pacility level */
printk("%s[SDFAT](%s[%d:%d]): %pV\n", level,
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
#ifdef CONFIG_SDFAT_SUPPORT_STLOG
if (st) {
ST_LOG("[SDFAT](%s[%d:%d]): %pV\n",
ST_LOG("[SDFAT](%s[%d:%d]): %pV\n",
sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf);
}
#endif
@ -125,15 +124,16 @@ EXPORT_SYMBOL(__sdfat_msg);
void sdfat_log_version(void)
{
printk(KERN_INFO "[SDFAT] Filesystem version %s\n", SDFAT_VERSION);
pr_info("[SDFAT] Filesystem version %s\n", SDFAT_VERSION);
#ifdef CONFIG_SDFAT_SUPPORT_STLOG
ST_LOG("[SDFAT] Filesystem version %s\n", SDFAT_VERSION);
#endif
}
EXPORT_SYMBOL(sdfat_log_version);
extern struct timezone sys_tz;
/* <linux/time.h> externs sys_tz
* extern struct timezone sys_tz;
*/
#define UNIX_SECS_1980 315532800L
#if BITS_PER_LONG == 64
@ -154,15 +154,15 @@ extern struct timezone sys_tz;
do { \
/* 2100 isn't leap year */ \
if (unlikely(year > NO_LEAP_YEAR_2100)) \
leap_year = ((year + 3) / 4) - 1; \
leap_year = ((year + 3) / 4) - 1; \
else \
leap_year = ((year + 3) / 4); \
} while(0)
leap_year = ((year + 3) / 4); \
} while (0)
/* Linear day numbers of the respective 1sts in non-leap years. */
static time_t accum_days_in_year[] = {
/* Month : 01 02 03 04 05 06 07 08 09 10 11 12 */
0, 0, 31, 59, 90,120,151,181,212,243,273,304,334, 0, 0, 0,
/* Month : N 01 02 03 04 05 06 07 08 09 10 11 12 */
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
@ -179,10 +179,10 @@ void sdfat_time_fat2unix(struct sdfat_sb_info *sbi, struct timespec *ts,
ts->tv_sec = tp->Second + tp->Minute * SECS_PER_MIN
+ tp->Hour * SECS_PER_HOUR
+ (year * 365 + ld + accum_days_in_year[(tp->Month)]
+ (year * 365 + ld + accum_days_in_year[tp->Month]
+ (tp->Day - 1) + DAYS_DELTA_DECADE) * SECS_PER_DAY;
if(!sbi->options.tz_utc)
if (!sbi->options.tz_utc)
ts->tv_sec += sys_tz.tz_minuteswest * SECS_PER_MIN;
ts->tv_nsec = 0;
@ -255,6 +255,7 @@ TIMESTAMP_T *tm_now(struct sdfat_sb_info *sbi, TIMESTAMP_T *tp)
{
struct timespec ts = CURRENT_TIME_SEC;
DATE_TIME_T dt;
sdfat_time_unix2fat(sbi, &ts, &dt);
tp->year = dt.Year;
@ -264,7 +265,7 @@ TIMESTAMP_T *tm_now(struct sdfat_sb_info *sbi, TIMESTAMP_T *tp)
tp->min = dt.Minute;
tp->sec = dt.Second;
return(tp);
return tp;
}
u8 calc_chksum_1byte(void *data, s32 len, u8 chksum)
@ -275,7 +276,7 @@ u8 calc_chksum_1byte(void *data, s32 len, u8 chksum)
for (i = 0; i < len; i++, c++)
chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
return(chksum);
return chksum;
}
u16 calc_chksum_2byte(void *data, s32 len, u16 chksum, s32 type)
@ -302,7 +303,8 @@ u32 sdfat_time_current_usec(struct timeval *tv)
#ifdef CONFIG_SDFAT_DBG_CAREFUL
/* Check the consistency of i_size_ondisk (FAT32, or flags 0x01 only) */
void sdfat_debug_check_clusters(struct inode *inode){
void sdfat_debug_check_clusters(struct inode *inode)
{
int num_clusters;
volatile uint32_t tmp_fat_chain[50];
volatile int num_clusters_org, tmp_i = 0;
@ -321,7 +323,8 @@ void sdfat_debug_check_clusters(struct inode *inode){
num_clusters_org = num_clusters;
if (clu.flags == 0x03) return;
if (clu.flags == 0x03)
return;
while (num_clusters > 0) {
/* FAT chain logging */
@ -357,6 +360,7 @@ void __sdfat_dmsg(int level, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
/* fmt already includes KERN_ pacility level */
printk("[%u] %pV", current->pid, &vaf);
va_end(args);
#else
@ -367,6 +371,7 @@ void __sdfat_dmsg(int level, const char *fmt, ...)
return;
va_start(args, fmt);
/* fmt already includes KERN_ pacility level */
vprintk(fmt, args);
va_end(args);
#endif

138
mpage.c
View File

@ -26,9 +26,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -64,7 +62,7 @@
#include <linux/swap.h> /* for mark_page_accessed() */
#include <asm/current.h>
#include <asm/unaligned.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
#include <linux/aio.h>
#endif
@ -80,30 +78,43 @@ static void __mpage_write_end_io(struct bio *bio, int err);
/*************************************************************************
* FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
static inline void __sdfat_submit_bio_write2(int flags, struct bio *bio)
{
bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
submit_bio(bio);
}
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) */
static inline void __sdfat_submit_bio_write2(int flags, struct bio *bio)
{
submit_bio(WRITE | flags, bio);
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
static void mpage_write_end_io(struct bio *bio)
{
__mpage_write_end_io(bio, bio->bi_error);
}
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0) */
static void mpage_write_end_io(struct bio *bio, int err)
{
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
err = 0;
__mpage_write_end_io(bio, err);
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) */
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
static inline int bio_get_nr_vecs(struct block_device *bdev)
{
return BIO_MAX_PAGES;
}
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) */
/* EMPTY */
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
static inline sector_t __sdfat_bio_sector(struct bio *bio)
{
return bio->bi_iter.bi_sector;
@ -123,10 +134,10 @@ static inline void __sdfat_set_bio_size(struct bio *bio, unsigned int size)
{
bio->bi_iter.bi_size = size;
}
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
static inline sector_t __sdfat_bio_sector(struct bio *bio)
{
return bio->bi_sector;
return bio->bi_sector;
}
static inline void __sdfat_set_bio_sector(struct bio *bio, sector_t sector)
@ -136,26 +147,26 @@ static inline void __sdfat_set_bio_sector(struct bio *bio, sector_t sector)
static inline unsigned int __sdfat_bio_size(struct bio *bio)
{
return bio->bi_size;
return bio->bi_size;
}
static inline void __sdfat_set_bio_size(struct bio *bio, unsigned int size)
{
bio->bi_size = size;
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) */
#endif
/* __check_dfr_on() and __dfr_writepage_end_io() functions are copied from
* sdfat.c.
* Each function should be same perfectly
/* __check_dfr_on() and __dfr_writepage_end_io() functions
* are copied from sdfat.c
* Each function should be same perfectly
*/
static inline int __check_dfr_on(struct inode *inode, loff_t start, loff_t end, const char *fname)
{
#ifdef CONFIG_SDFAT_DFR
#ifdef CONFIG_SDFAT_DFR
struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
if ( (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) &&
fsapi_dfr_check_dfr_on(inode, start, end, 0, fname) )
if ((atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) &&
fsapi_dfr_check_dfr_on(inode, start, end, 0, fname))
return 1;
#endif
return 0;
@ -163,8 +174,9 @@ static inline int __check_dfr_on(struct inode *inode, loff_t start, loff_t end,
static inline int __dfr_writepage_end_io(struct page *page)
{
#ifdef CONFIG_SDFAT_DFR
#ifdef CONFIG_SDFAT_DFR
struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ)
fsapi_dfr_writepage_endio(page);
#endif
@ -172,7 +184,7 @@ static inline int __dfr_writepage_end_io(struct page *page)
}
static inline unsigned int __calc_size_to_align(struct super_block* sb)
static inline unsigned int __calc_size_to_align(struct super_block *sb)
{
struct block_device *bdev = sb->s_bdev;
struct gendisk *disk;
@ -180,7 +192,7 @@ static inline unsigned int __calc_size_to_align(struct super_block* sb)
struct queue_limits *limit;
unsigned int max_sectors;
unsigned int aligned = 0;
disk = bdev->bd_disk;
if (!disk)
goto out;
@ -203,8 +215,8 @@ struct mpage_data {
struct bio *bio;
sector_t last_block_in_bio;
get_block_t *get_block;
unsigned use_writepage;
unsigned size_to_align;
unsigned int use_writepage;
unsigned int size_to_align;
};
/*
@ -235,7 +247,7 @@ static void __mpage_write_end_io(struct bio *bio, int err)
if (page->mapping)
mapping_set_error(page->mapping, err);
}
__dfr_writepage_end_io(page);
end_page_writeback(page);
@ -243,10 +255,10 @@ static void __mpage_write_end_io(struct bio *bio, int err)
bio_put(bio);
}
static struct bio *mpage_bio_submit(int rw, struct bio *bio)
static struct bio *mpage_bio_submit_write(int flags, struct bio *bio)
{
bio->bi_end_io = mpage_write_end_io;
submit_bio(rw, bio);
__sdfat_submit_bio_write2(flags, bio);
return NULL;
}
@ -271,20 +283,20 @@ mpage_alloc(struct block_device *bdev,
return bio;
}
static int sdfat_mpage_writepage(struct page *page,
static int sdfat_mpage_writepage(struct page *page,
struct writeback_control *wbc, void *data)
{
struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
struct address_space *mapping = page->mapping;
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
const unsigned int blkbits = inode->i_blkbits;
const unsigned int blocks_per_page = PAGE_SIZE >> blkbits;
sector_t last_block;
sector_t block_in_file;
sector_t blocks[MAX_BUF_PER_PAGE];
unsigned page_block;
unsigned first_unmapped = blocks_per_page;
unsigned int page_block;
unsigned int first_unmapped = blocks_per_page;
struct block_device *bdev = NULL;
int boundary = 0;
sector_t boundary_block = 0;
@ -292,7 +304,7 @@ static int sdfat_mpage_writepage(struct page *page,
int length;
struct buffer_head map_bh;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned long end_index = i_size >> PAGE_SHIFT;
int ret = 0;
if (page_has_buffers(page)) {
@ -323,9 +335,10 @@ static int sdfat_mpage_writepage(struct page *page,
/* bh should be mapped if delay is set */
if (buffer_delay(bh)) {
sector_t blk_in_file = (sector_t)(page->index << (PAGE_CACHE_SHIFT - blkbits)) + page_block;
BUG_ON(bh->b_size != (1 << blkbits));
sector_t blk_in_file =
(sector_t)(page->index << (PAGE_SHIFT - blkbits)) + page_block;
BUG_ON(bh->b_size != (1 << blkbits));
if (page->index > end_index) {
MMSG("%s(inode:%p) "
"over end with delayed buffer"
@ -351,7 +364,7 @@ static int sdfat_mpage_writepage(struct page *page,
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
}
}
if (page_block) {
if (bh->b_blocknr != blocks[page_block-1] + 1) {
MMSG("%s(inode:%p) pblk(%d) "
@ -387,7 +400,7 @@ static int sdfat_mpage_writepage(struct page *page,
* The page has no buffers: map it to disk
*/
BUG_ON(!PageUptodate(page));
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
last_block = (i_size - 1) >> blkbits;
map_bh.b_page = page;
for (page_block = 0; page_block < blocks_per_page; ) {
@ -396,7 +409,7 @@ static int sdfat_mpage_writepage(struct page *page,
map_bh.b_size = 1 << blkbits;
if (mpd->get_block(inode, block_in_file, &map_bh, 1))
goto confused;
if (buffer_new(&map_bh))
unmap_underlying_metadata(map_bh.b_bdev,
map_bh.b_blocknr);
@ -430,7 +443,7 @@ page_is_mapped:
* is zeroed when mapped, and writes to that region are not
* written out to the file."
*/
unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
unsigned int offset = i_size & (PAGE_SIZE - 1);
if (page->index > end_index || !offset) {
MMSG("%s(inode:%p) over end "
@ -439,7 +452,7 @@ page_is_mapped:
(u32)end_index, (u32)offset);
goto confused;
}
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
zero_user_segment(page, offset, PAGE_SIZE);
}
/*
@ -449,22 +462,22 @@ page_is_mapped:
*/
if (bio) {
if (mpd->last_block_in_bio != blocks[0] - 1) {
bio = mpage_bio_submit(WRITE, bio);
bio = mpage_bio_submit_write(0, bio);
} else if (mpd->size_to_align) {
unsigned mask = mpd->size_to_align - 1;
sector_t max_end_block =
unsigned int mask = mpd->size_to_align - 1;
sector_t max_end_block =
(__sdfat_bio_sector(bio) & ~(mask)) + mask;
if ( (__sdfat_bio_size(bio) != (1 << (mask + 1))) &&
(mpd->last_block_in_bio == max_end_block) ) {
if ((__sdfat_bio_size(bio) != (1 << (mask + 1))) &&
(mpd->last_block_in_bio == max_end_block)) {
MMSG("%s(inode:%p) alignment mpage_bio_submit"
"(start:%u, len:%u aligned:%u)\n",
__func__, inode,
(unsigned)__sdfat_bio_sector(bio),
(unsigned)(mpd->last_block_in_bio -
(unsigned int)__sdfat_bio_sector(bio),
(unsigned int)(mpd->last_block_in_bio -
__sdfat_bio_sector(bio) + 1),
(unsigned)mpd->size_to_align);
bio = mpage_bio_submit(WRITE | REQ_NOMERGE, bio);
(unsigned int)mpd->size_to_align);
bio = mpage_bio_submit_write(REQ_NOMERGE, bio);
}
}
}
@ -484,7 +497,7 @@ alloc_new:
*/
length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(WRITE, bio);
bio = mpage_bio_submit_write(0, bio);
goto alloc_new;
}
@ -495,7 +508,7 @@ alloc_new:
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
unsigned buffer_counter = 0;
unsigned int buffer_counter = 0;
do {
if (buffer_counter++ == first_unmapped)
@ -516,28 +529,29 @@ alloc_new:
BUG_ON(PageWriteback(page));
set_page_writeback(page);
/*
/*
* FIXME FOR DEFRAGMENTATION : CODE REVIEW IS REQUIRED
*
* Turn off MAPPED flag in victim's bh if defrag on.
* Another write_begin can starts after get_block for defrag victims
* Another write_begin can starts after get_block for defrag victims
* called.
* In this case, write_begin calls get_block and get original block
* In this case, write_begin calls get_block and get original block
* number and previous defrag will be canceled.
*/
if (unlikely(__check_dfr_on(inode, (loff_t)(page->index << PAGE_SHIFT),
(loff_t)((page->index + 1) << PAGE_SHIFT), __func__))) {
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
do {
clear_buffer_mapped(bh);
bh = bh->b_this_page;
} while (bh != head);
}
unlock_page(page);
if (boundary || (first_unmapped != blocks_per_page)) {
bio = mpage_bio_submit(WRITE, bio);
bio = mpage_bio_submit_write(0, bio);
if (boundary_block) {
write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
@ -550,7 +564,7 @@ alloc_new:
confused:
if (bio)
bio = mpage_bio_submit(WRITE, bio);
bio = mpage_bio_submit_write(0, bio);
if (mpd->use_writepage) {
ret = mapping->a_ops->writepage(page, wbc);
@ -579,14 +593,12 @@ int sdfat_mpage_writepages(struct address_space *mapping,
.use_writepage = 1,
.size_to_align = __calc_size_to_align(mapping->host->i_sb),
};
BUG_ON(!get_block);
blk_start_plug(&plug);
BUG_ON(!get_block);
blk_start_plug(&plug);
ret = write_cache_pages(mapping, wbc, sdfat_mpage_writepage, &mpd);
if (mpd.bio)
mpage_bio_submit(WRITE, mpd.bio);
mpage_bio_submit_write(0, mpd.bio);
blk_finish_plug(&plug);
return ret;
}

89
nls.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -52,7 +50,7 @@ static u16 bad_dos_chars[] = {
/*
* Allow full-width illegal characters :
* "MS windows 7" supports full-width-invalid-name-characters.
* So we should check half-width-invalid-name-characters(ASCII) only
* So we should check half-width-invalid-name-characters(ASCII) only
* for compatibility.
*
* " * / : < > ? \ |
@ -62,10 +60,10 @@ static u16 bad_dos_chars[] = {
static u16 bad_uni_chars[] = {
0x0022, 0x002A, 0x002F, 0x003A,
0x003C, 0x003E, 0x003F, 0x005C, 0x007C,
/*
#if 0 /* allow full-width characters */
0x201C, 0x201D, 0xFF0A, 0xFF0F, 0xFF1A,
0xFF1C, 0xFF1E, 0xFF1F, 0xFF3C, 0xFF5C,
*/
#endif
0
};
@ -101,7 +99,7 @@ u16 *nls_wstrchr(u16 *str, u16 wchar)
s32 nls_cmp_sfn(struct super_block *sb, u8 *a, u8 *b)
{
return(strncmp((void *) a, (void *) b, DOS_NAME_LENGTH));
return strncmp((void *)a, (void *)b, DOS_NAME_LENGTH);
}
s32 nls_cmp_uniname(struct super_block *sb, u16 *a, u16 *b)
@ -109,10 +107,10 @@ s32 nls_cmp_uniname(struct super_block *sb, u16 *a, u16 *b)
s32 i;
for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) {
if (nls_upper(sb, *a) != nls_upper(sb, *b))
return 1;
if (*a == 0x0)
return 0;
if (nls_upper(sb, *a) != nls_upper(sb, *b))
return 1;
if (*a == 0x0)
return 0;
}
return 0;
}
@ -146,7 +144,8 @@ s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T
i = 0;
while (i < DOS_NAME_LENGTH) {
if (i == 8) {
if (last_period == NULL) break;
if (last_period == NULL)
break;
if (uniname <= last_period) {
if (uniname < last_period)
@ -172,9 +171,9 @@ s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T
len = convert_uni_to_ch(nls, *uniname, buf, &lossy);
if (len > 1) {
if ((i >= 8) && ((i+len) > DOS_NAME_LENGTH)) {
if ((i >= 8) && ((i+len) > DOS_NAME_LENGTH))
break;
}
if ((i < 8) && ((i+len) > 8)) {
i = 8;
continue;
@ -182,9 +181,8 @@ s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T
lower = 0xFF;
for (j = 0; j < len; j++, i++) {
for (j = 0; j < len; j++, i++)
*(dosname+i) = *(buf+j);
}
} else { /* len == 1 */
if ((*buf >= 'a') && (*buf <= 'z')) {
*(dosname+i) = *buf - ('a' - 'A');
@ -208,18 +206,18 @@ s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T
uniname++;
}
if (*dosname == 0xE5)
*dosname = 0x05;
if (*uniname != 0x0)
lossy |= NLS_NAME_OVERLEN;
if (*dosname == 0xE5)
*dosname = 0x05;
if (*uniname != 0x0)
lossy |= NLS_NAME_OVERLEN;
if (upper & lower)
p_dosname->name_case = 0xFF;
else
p_dosname->name_case = lower;
if (upper & lower)
p_dosname->name_case = 0xFF;
else
p_dosname->name_case = lower;
if (p_lossy)
*p_lossy = lossy;
if (p_lossy)
*p_lossy = lossy;
return i;
}
@ -266,7 +264,7 @@ s32 nls_sfn_to_uni16s(struct super_block *sb, DOS_NAME_T *p_dosname, UNI_NAME_T
i = j = 0;
while (j < MAX_NAME_LENGTH) {
if (*(buf+i) == '\0')
if (*(buf+i) == '\0')
break;
i += convert_ch_to_uni(nls, (buf+i), uniname, NULL);
@ -291,7 +289,8 @@ static s32 __nls_utf16s_to_vfsname(struct super_block *sb, UNI_NAME_T *p_uniname
return len;
}
static s32 __nls_vfsname_to_utf16s(struct super_block *sb, const u8 *p_cstring, const s32 len, UNI_NAME_T *p_uniname, s32 *p_lossy)
static s32 __nls_vfsname_to_utf16s(struct super_block *sb, const u8 *p_cstring,
const s32 len, UNI_NAME_T *p_uniname, s32 *p_lossy)
{
s32 i, unilen, lossy = NLS_NAME_NO_LOSSY;
u16 upname[MAX_NAME_LENGTH+1];
@ -307,7 +306,7 @@ static s32 __nls_vfsname_to_utf16s(struct super_block *sb, const u8 *p_cstring,
return unilen;
}
if (unilen > MAX_NAME_LENGTH ) {
if (unilen > MAX_NAME_LENGTH) {
MMSG("%s: failed to vfsname_to_utf16(estr:ENAMETOOLONG) "
"vfsnamelen:%d, unilen:%d>%d",
__func__, len, unilen, MAX_NAME_LENGTH);
@ -316,7 +315,7 @@ static s32 __nls_vfsname_to_utf16s(struct super_block *sb, const u8 *p_cstring,
p_uniname->name_len = (u8)(unilen & 0xFF);
for (i=0; i<unilen; i++) {
for (i = 0; i < unilen; i++) {
if ((*uniname < 0x0020) || nls_wstrchr(bad_uni_chars, *uniname))
lossy |= NLS_NAME_LOSSY;
@ -344,8 +343,8 @@ static s32 __nls_uni16s_to_vfsname(struct super_block *sb, UNI_NAME_T *p_uniname
i = 0;
while ((i < MAX_NAME_LENGTH) && (out_len < (buflen-1))) {
if (*uniname == (u16) '\0')
break;
if (*uniname == (u16)'\0')
break;
len = convert_uni_to_ch(nls, *uniname, buf, NULL);
@ -369,7 +368,8 @@ static s32 __nls_uni16s_to_vfsname(struct super_block *sb, UNI_NAME_T *p_uniname
return out_len;
}
static s32 __nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring, const s32 len, UNI_NAME_T *p_uniname, s32 *p_lossy)
static s32 __nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring,
const s32 len, UNI_NAME_T *p_uniname, s32 *p_lossy)
{
s32 i, unilen, lossy = NLS_NAME_NO_LOSSY;
u16 upname[MAX_NAME_LENGTH+1];
@ -379,8 +379,8 @@ static s32 __nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring,
BUG_ON(!len);
i = unilen = 0;
while ( (unilen < MAX_NAME_LENGTH) && (i < len)) {
i += convert_ch_to_uni(nls, (u8*)(p_cstring+i), uniname, &lossy);
while ((unilen < MAX_NAME_LENGTH) && (i < len)) {
i += convert_ch_to_uni(nls, (u8 *)(p_cstring+i), uniname, &lossy);
if ((*uniname < 0x0020) || nls_wstrchr(bad_uni_chars, *uniname))
lossy |= NLS_NAME_LOSSY;
@ -393,13 +393,13 @@ static s32 __nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring,
if (*(p_cstring+i) != '\0')
lossy |= NLS_NAME_OVERLEN;
*uniname = (u16)'\0';
p_uniname->name_len = unilen;
p_uniname->name_hash =
calc_chksum_2byte((void *) upname, unilen<<1, 0, CS_DEFAULT);
if (p_lossy)
if (p_lossy)
*p_lossy = lossy;
return unilen;
@ -435,16 +435,16 @@ static s32 convert_ch_to_uni(struct nls_table *nls, u8 *ch, u16 *uni, s32 *lossy
return 1;
}
if ((len = nls->char2uni(ch, MAX_CHARSET_SIZE, uni)) < 0) {
len = nls->char2uni(ch, MAX_CHARSET_SIZE, uni);
if (len < 0) {
/* conversion failed */
DMSG("%s: fail to use nls \n", __func__);
DMSG("%s: fail to use nls\n", __func__);
if (lossy != NULL)
*lossy |= NLS_NAME_LOSSY;
*uni = (u16) '_';
if (!strcmp(nls->charset, "utf8"))
if (!strcmp(nls->charset, "utf8"))
return 1;
else
return 2;
return 2;
}
return len;
@ -461,9 +461,10 @@ static s32 convert_uni_to_ch(struct nls_table *nls, u16 uni, u8 *ch, s32 *lossy)
return 1;
}
if ((len = nls->uni2char(uni, ch, MAX_CHARSET_SIZE)) < 0) {
len = nls->uni2char(uni, ch, MAX_CHARSET_SIZE);
if (len < 0) {
/* conversion failed */
DMSG("%s: fail to use nls \n", __func__);
DMSG("%s: fail to use nls\n", __func__);
if (lossy != NULL)
*lossy |= NLS_NAME_LOSSY;
ch[0] = '_';

935
sdfat.c

File diff suppressed because it is too large Load Diff

148
sdfat.h
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_H
@ -35,28 +33,28 @@
#include "dfr.h"
#endif
/*
/*
* sdfat error flags
*/
#define SDFAT_ERRORS_CONT 1 /* ignore error and continue */
#define SDFAT_ERRORS_PANIC 2 /* panic on error */
#define SDFAT_ERRORS_RO 3 /* remount r/o on error */
#define SDFAT_ERRORS_CONT (1) /* ignore error and continue */
#define SDFAT_ERRORS_PANIC (2) /* panic on error */
#define SDFAT_ERRORS_RO (3) /* remount r/o on error */
/*
* sdfat allocator flags
*/
#define SDFAT_ALLOC_DELAY 1 /* Delayed allocation */
#define SDFAT_ALLOC_SMART 2 /* Smart allocation */
#define SDFAT_ALLOC_DELAY (1) /* Delayed allocation */
#define SDFAT_ALLOC_SMART (2) /* Smart allocation */
/*
* sdfat allocator destination for smart allocation
*/
#define ALLOC_NOWHERE 0
#define ALLOC_COLD 1
#define ALLOC_HOT 16
#define ALLOC_COLD_ALIGNED 1
#define ALLOC_COLD_PACKING 2
#define ALLOC_COLD_SEQ 4
#define ALLOC_NOWHERE (0)
#define ALLOC_COLD (1)
#define ALLOC_HOT (16)
#define ALLOC_COLD_ALIGNED (1)
#define ALLOC_COLD_PACKING (2)
#define ALLOC_COLD_SEQ (4)
/*
* sdfat nls lossy flag
@ -71,22 +69,24 @@
#define CLUSTER_16(x) ((u16)((x) & 0xFFFFU))
#define CLUSTER_32(x) ((u32)((x) & 0xFFFFFFFFU))
#define CLUS_EOF CLUSTER_32(~0)
#define CLUS_BAD (0xFFFFFFF7U)
#define CLUS_FREE (0)
#define CLUS_BASE (2)
#define IS_CLUS_EOF(x) (x == CLUS_EOF)
#define IS_CLUS_FREE(x) (x == CLUS_FREE)
#define IS_LAST_SECT_IN_CLUS(fsi, sec) \
( (((sec) - (fsi)->data_start_sector + 1) \
& ((1 << (fsi)->sect_per_clus_bits) -1)) == 0 )
#define IS_CLUS_EOF(x) ((x) == CLUS_EOF)
#define IS_CLUS_BAD(x) ((x) == CLUS_BAD)
#define IS_CLUS_FREE(x) ((x) == CLUS_FREE)
#define IS_LAST_SECT_IN_CLUS(fsi, sec) \
((((sec) - (fsi)->data_start_sector + 1) \
& ((1 << (fsi)->sect_per_clus_bits) - 1)) == 0)
#define CLUS_TO_SECT(fsi, x) \
( (((x) - CLUS_BASE) << (fsi)->sect_per_clus_bits) + (fsi)->data_start_sector )
((((x) - CLUS_BASE) << (fsi)->sect_per_clus_bits) + (fsi)->data_start_sector)
#define SECT_TO_CLUS(fsi, sec) \
((((sec) - (fsi)->data_start_sector) >> (fsi)->sect_per_clus_bits) + CLUS_BASE)
((((sec) - (fsi)->data_start_sector) >> (fsi)->sect_per_clus_bits) + CLUS_BASE)
/* variables defined at sdfat.c */
extern const char* FS_TYPE_STR[];
extern const char *FS_TYPE_STR[];
enum {
FS_TYPE_AUTO,
@ -99,12 +99,12 @@ enum {
* sdfat mount in-memory data
*/
struct sdfat_mount_options {
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
uid_t fs_uid;
gid_t fs_gid;
#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
kuid_t fs_uid;
kgid_t fs_gid;
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) */
uid_t fs_uid;
gid_t fs_gid;
#endif
unsigned short fs_fmask;
unsigned short fs_dmask;
@ -142,7 +142,7 @@ struct sdfat_sb_info {
struct mutex s_vlock; /* volume lock */
int use_vmalloc;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,00)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
int s_dirt;
struct mutex s_lock; /* superblock lock */
int write_super_queued; /* Write_super work is pending? */
@ -195,12 +195,12 @@ struct sdfat_sb_info {
struct sdfat_inode_info {
FILE_ID_T fid;
char *target;
/* NOTE: i_size_ondisk is 64bits, so must hold ->i_mutex to access */
/* NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access */
loff_t i_size_ondisk; /* physically allocated size */
loff_t i_size_aligned; /* block-aligned i_size (used in cont_write_begin) */
loff_t i_pos; /* on-disk position of directory entry or 0 */
struct hlist_node i_hash_fat; /* hash by i_location */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
struct rw_semaphore truncate_lock; /* protect bmap against truncate */
#endif
#ifdef CONFIG_SDFAT_DFR
@ -218,7 +218,7 @@ static inline const char *sdfat_get_vol_type_str(unsigned int type)
{
if (type == EXFAT)
return "exfat";
else if (type == FAT32)
else if (type == FAT32)
return "vfat:32";
else if (type == FAT16)
return "vfat:16";
@ -230,10 +230,10 @@ static inline const char *sdfat_get_vol_type_str(unsigned int type)
static inline struct sdfat_sb_info *SDFAT_SB(struct super_block *sb)
{
return (struct sdfat_sb_info*)sb->s_fs_info;
return (struct sdfat_sb_info *)sb->s_fs_info;
}
static inline struct sdfat_inode_info *SDFAT_I(struct inode *inode)
static inline struct sdfat_inode_info *SDFAT_I(struct inode *inode)
{
return container_of(inode, struct sdfat_inode_info, vfs_inode);
}
@ -279,6 +279,7 @@ static inline mode_t sdfat_make_mode(struct sdfat_sb_info *sbi,
static inline u32 sdfat_make_attr(struct inode *inode)
{
u32 attrs = SDFAT_I(inode)->fid.attr;
if (S_ISDIR(inode->i_mode))
attrs |= ATTR_SUBDIR;
if (sdfat_mode_can_hold_ro(inode) && !(inode->i_mode & S_IWUGO))
@ -294,6 +295,31 @@ static inline void sdfat_save_attr(struct inode *inode, u32 attr)
SDFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY);
}
/* sdfat/statistics.c */
/* bigdata function */
#ifdef CONFIG_SDFAT_STATISTICS
extern int sdfat_statistics_init(struct kset *sdfat_kset);
extern void sdfat_statistics_uninit(void);
extern void sdfat_statistics_set_mnt(FS_INFO_T *fsi);
extern void sdfat_statistics_set_mkdir(u8 flags);
extern void sdfat_statistics_set_create(u8 flags);
extern void sdfat_statistics_set_rw(u8 flags, u32 clu_offset, s32 create);
extern void sdfat_statistics_set_trunc(u8 flags, CHAIN_T *clu);
extern void sdfat_statistics_set_vol_size(struct super_block *sb);
#else
static inline int sdfat_statistics_init(struct kset *sdfat_kset)
{
return 0;
}
static inline void sdfat_statistics_uninit(void) {};
static inline void sdfat_statistics_set_mnt(FS_INFO_T *fsi) {};
static inline void sdfat_statistics_set_mkdir(u8 flags) {};
static inline void sdfat_statistics_set_create(u8 flags) {};
static inline void sdfat_statistics_set_rw(u8 flags, u32 clu_offset, s32 create) {};
static inline void sdfat_statistics_set_trunc(u8 flags, CHAIN_T *clu) {};
static inline void sdfat_statistics_set_vol_size(struct super_block *sb) {};
#endif
/* sdfat/nls.c */
/* NLS management function */
s32 nls_cmp_sfn(struct super_block *sb, u8 *a, u8 *b);
@ -301,37 +327,41 @@ s32 nls_cmp_uniname(struct super_block *sb, u16 *a, u16 *b);
s32 nls_uni16s_to_sfn(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname, s32 *p_lossy);
s32 nls_sfn_to_uni16s(struct super_block *sb, DOS_NAME_T *p_dosname, UNI_NAME_T *p_uniname);
s32 nls_uni16s_to_vfsname(struct super_block *sb, UNI_NAME_T *uniname, u8 *p_cstring, s32 len);
s32 nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring, const s32 len, UNI_NAME_T *uniname, s32 *p_lossy);
s32 nls_vfsname_to_uni16s(struct super_block *sb, const u8 *p_cstring,
const s32 len, UNI_NAME_T *uniname, s32 *p_lossy);
/* sdfat/mpage.c */
#ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE
int sdfat_mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t *get_block);
struct writeback_control *wbc, get_block_t *get_block);
#endif
/* sdfat/xattr.c */
#ifdef CONFIG_SDFAT_VIRTUAL_XATTR
extern int sdfat_setxattr(struct dentry*dentry, const char *name, const void *value, size_t size, int flags);
void setup_sdfat_xattr_handler(struct super_block *sb);
extern int sdfat_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
extern ssize_t sdfat_getxattr(struct dentry *dentry, const char *name, void *value, size_t size);
extern ssize_t sdfat_listxattr(struct dentry *dentry, char *list, size_t size);
extern int sdfat_removexattr(struct dentry *dentry, const char *name);
#else
static inline void setup_sdfat_xattr_handler(struct super_block *sb) {};
#endif
/* sdfat/misc.c */
extern void
__sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
__attribute__ ((format (printf, 3, 4))) __cold;
__printf(3, 4) __cold;
#define sdfat_fs_error(sb, fmt, args...) \
__sdfat_fs_error(sb, 1, fmt , ## args)
__sdfat_fs_error(sb, 1, fmt, ## args)
#define sdfat_fs_error_ratelimit(sb, fmt, args...) \
__sdfat_fs_error(sb, __ratelimit(&SDFAT_SB(sb)->ratelimit), fmt, ## args)
extern void
extern void
__sdfat_msg(struct super_block *sb, const char *lv, int st, const char *fmt, ...)
__attribute__ ((format (printf, 4, 5))) __cold;
__printf(4, 5) __cold;
#define sdfat_msg(sb, lv, fmt, args...) \
__sdfat_msg(sb, lv, 0, fmt , ## args)
__sdfat_msg(sb, lv, 0, fmt, ## args)
#define sdfat_log_msg(sb, lv, fmt, args...) \
__sdfat_msg(sb, lv, 1, fmt , ## args)
__sdfat_msg(sb, lv, 1, fmt, ## args)
extern void sdfat_log_version(void);
extern void sdfat_time_fat2unix(struct sdfat_sb_info *sbi, struct timespec *ts,
DATE_TIME_T *tp);
@ -367,20 +397,16 @@ void sdfat_debug_check_clusters(struct inode *inode);
#endif /* CONFIG_SDFAT_DEBUG */
#ifdef CONFIG_SDFAT_TRACE_ELAPSED_TIME
u32 sdfat_time_current_usec(struct timeval* tv);
u32 sdfat_time_current_usec(struct timeval *tv);
extern struct timeval __t1;
extern struct timeval __t2;
#define TIME_GET(tv) sdfat_time_current_usec(tv)
#define TIME_START(s) do {sdfat_time_current_usec(s); } while (0)
#define TIME_END(e) do {sdfat_time_current_usec(e); } while (0)
#define TIME_GET(tv) sdfat_time_current_usec(tv)
#define TIME_START(s) sdfat_time_current_usec(s)
#define TIME_END(e) sdfat_time_current_usec(e)
#define TIME_ELAPSED(s, e) ((u32)(((e)->tv_sec - (s)->tv_sec) * 1000000 + \
((e)->tv_usec - (s)->tv_usec)))
#define PRINT_TIME(n) \
do { \
printk("[SDFAT] Elapsed time %d = %d (usec)\n", \
n, (__t2 - __t1)); \
} while(0)
#define PRINT_TIME(n) pr_info("[SDFAT] Elapsed time %d = %d (usec)\n", n, (__t2 - __t1))
#else /* CONFIG_SDFAT_TRACE_ELAPSED_TIME */
#define TIME_GET(tv) (0)
#define TIME_START(s)
@ -403,11 +429,12 @@ extern struct timeval __t2;
#define __S(x) #x
#define _S(x) __S(x)
extern void __sdfat_dmsg(int level, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3))) __cold;
extern void __sdfat_dmsg(int level, const char *fmt, ...) __printf(2, 3) __cold;
#define SDFAT_EMSG_T(level, ...) __sdfat_dmsg(level, KERN_ERR "[" SDFAT_TAG_NAME "] [" _S(__FILE__) "(" _S(__LINE__) ")] " __VA_ARGS__)
#define SDFAT_DMSG_T(level, ...) __sdfat_dmsg(level, KERN_INFO "[" SDFAT_TAG_NAME "] " __VA_ARGS__)
#define SDFAT_EMSG_T(level, ...) \
__sdfat_dmsg(level, KERN_ERR "[" SDFAT_TAG_NAME "] [" _S(__FILE__) "(" _S(__LINE__) ")] " __VA_ARGS__)
#define SDFAT_DMSG_T(level, ...) \
__sdfat_dmsg(level, KERN_INFO "[" SDFAT_TAG_NAME "] " __VA_ARGS__)
#define SDFAT_EMSG(...) SDFAT_EMSG_T(SDFAT_MSG_LV_ERR, __VA_ARGS__)
#define SDFAT_IMSG(...) SDFAT_DMSG_T(SDFAT_MSG_LV_INFO, __VA_ARGS__)
@ -469,11 +496,12 @@ extern void __sdfat_dmsg(int level, const char *fmt, ...)
#endif /* CONFIG_SDFAT_DBG_MSG */
#define ASSERT(expr) \
if (!(expr)) { \
printk(KERN_ERR "Assertion failed! %s\n", #expr); \
BUG_ON(1); \
}
#define ASSERT(expr) { \
if (!(expr)) { \
pr_err("Assertion failed! %s\n", #expr); \
BUG_ON(1); \
} \
}
#endif /* !_SDFAT_H */

View File

@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SDFAT_FS_H
@ -59,7 +58,7 @@
/* NOTE :
* The maximum length of input or output is limited to 256 including NULL,
* But we allocate 4 extra bytes for utf8 translation reside in last position,
* because utf8 can uses memory upto 6 bytes per one charactor.
* because utf8 can uses memory upto 6 bytes per one character.
* Therefore, MAX_CHARSET_SIZE supports upto 6 bytes for utf8
*/
#define MAX_UNINAME_BUF_SIZE (((MAX_NAME_LENGTH+1)*2)+4)
@ -73,6 +72,7 @@
#define DENTRY_SIZE_BITS 5
#define MAX_FAT_DENTRIES 65536 /* FAT allows 65536 directory entries */
#define MAX_EXFAT_DENTRIES 8388608 /* exFAT allows 8388608(256MB) directory entries */
/* PBR entries */
#define PBR_SIGNATURE 0xAA55
@ -100,7 +100,7 @@
#define MSDOS_UNUSED 0x00 /* end of directory */
#define EXFAT_UNUSED 0x00 /* end of directory */
#define IS_EXFAT_DELETED(x) ((x)<0x80) /* deleted file (0x01~0x7F) */
#define IS_EXFAT_DELETED(x) ((x) < 0x80) /* deleted file (0x01~0x7F) */
#define EXFAT_INVAL 0x80 /* invalid value */
#define EXFAT_BITMAP 0x81 /* allocation bitmap */
#define EXFAT_UPCASE 0x82 /* upcase table */
@ -143,17 +143,17 @@
#define CS_PBR_SECTOR 1
#define CS_DEFAULT 2
/*
* ioctl command
/*
* ioctl command
*/
#define SDFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
#define SDFAT_IOCTL_DFR_INFO _IOC(_IOC_NONE, 'E', 0x13, sizeof(u32))
#define SDFAT_IOCTL_DFR_TRAV _IOC(_IOC_NONE, 'E', 0x14, sizeof(u32))
#define SDFAT_IOCTL_DFR_TRAV _IOC(_IOC_NONE, 'E', 0x14, sizeof(u32))
#define SDFAT_IOCTL_DFR_REQ _IOC(_IOC_NONE, 'E', 0x15, sizeof(u32))
#define SDFAT_IOCTL_DFR_SPO_FLAG _IOC(_IOC_NONE, 'E', 0x16, sizeof(u32))
#define SDFAT_IOCTL_PANIC _IOC(_IOC_NONE, 'E', 0x17, sizeof(u32))
/*
/*
* ioctl command for debugging
*/
@ -162,7 +162,7 @@
* - file systems typically #0~0x1F
* - embedded terminal devices #128~
* - exts for debugging purpose #99
* number 100 and 101 is availble now but has possible conflicts
* number 100 and 101 is available now but has possible conflicts
*
* NOTE : This is available only If CONFIG_SDFAT_DVBG_IOCTL is enabled.
*
@ -184,15 +184,15 @@ typedef struct {
__u8 sect_size[2]; /* unaligned */
__u8 sect_per_clus;
__le16 num_reserved; /* . */
__le16 num_reserved; /* . */
__u8 num_fats;
__u8 num_root_entries[2]; /* unaligned */
__u8 num_root_entries[2]; /* unaligned */
__u8 num_sectors[2]; /* unaligned */
__u8 media_type;
__le16 num_fat_sectors;
__le16 sectors_in_track;
__le16 num_heads;
__le32 num_hid_sectors; /* . */
__le32 num_hid_sectors; /* . */
__le32 num_huge_sectors;
__u8 phy_drv_no;
@ -213,7 +213,7 @@ typedef struct {
__u8 sect_per_clus;
__le16 num_reserved;
__u8 num_fats;
__u8 num_root_entries[2]; /* unaligned */
__u8 num_root_entries[2]; /* unaligned */
__u8 num_sectors[2]; /* unaligned */
__u8 media_type;
__le16 num_fat_sectors; /* zero */

262
statistics.c Normal file
View File

@ -0,0 +1,262 @@
#include "sdfat.h"
#define SDFAT_VF_CLUS_MAX 7 /* 512 Byte ~ 32 KByte */
#define SDFAT_EF_CLUS_MAX 17 /* 512 Byte ~ 32 MByte */
enum {
SDFAT_MNT_FAT12,
SDFAT_MNT_FAT16,
SDFAT_MNT_FAT32,
SDFAT_MNT_EXFAT,
SDFAT_MNT_MAX
};
enum {
SDFAT_OP_EXFAT_MNT,
SDFAT_OP_MKDIR,
SDFAT_OP_CREATE,
SDFAT_OP_READ,
SDFAT_OP_WRITE,
SDFAT_OP_TRUNC,
SDFAT_OP_MAX
};
enum {
SDFAT_VOL_4G,
SDFAT_VOL_8G,
SDFAT_VOL_16G,
SDFAT_VOL_32G,
SDFAT_VOL_64G,
SDFAT_VOL_128G,
SDFAT_VOL_256G,
SDFAT_VOL_512G,
SDFAT_VOL_XTB,
SDFAT_VOL_MAX
};
static struct sdfat_statistics {
u32 clus_vfat[SDFAT_VF_CLUS_MAX];
u32 clus_exfat[SDFAT_EF_CLUS_MAX];
u32 mnt_cnt[SDFAT_MNT_MAX];
u32 nofat_op[SDFAT_OP_MAX];
u32 vol_size[SDFAT_VOL_MAX];
} statistics;
static struct kset *sdfat_statistics_kset;
static ssize_t vfat_cl_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "VCL_512B_I:%u,VCL_1K_I:%u,VCL_2K_I:%u,"
"VCL_4K_I:%u,VCL_8K_I:%u,VCL_16K_I:%u,VCL_32K_I:%u\n",
statistics.clus_vfat[0], statistics.clus_vfat[1],
statistics.clus_vfat[2], statistics.clus_vfat[3],
statistics.clus_vfat[4], statistics.clus_vfat[5],
statistics.clus_vfat[6]);
}
static ssize_t exfat_cl_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "ECL_512B_I:%u,ECL_1K_I:%u,ECL_2K_I:%u,"
"ECL_4K_I:%u,ECL_8K_I:%u,ECL_16K_I:%u,ECL_32K_I:%u,ECL_64K_I:%u,"
"ECL_128K_I:%u,ECL_256K_I:%u,ECL_512K_I:%u,ECL_1M_I:%u,"
"ECL_2M_I:%u,ECL_4M_I:%u,ECL_8M_I:%u,ECL_16M_I:%u,ECL_32M_I:%u\n",
statistics.clus_exfat[0], statistics.clus_exfat[1],
statistics.clus_exfat[2], statistics.clus_exfat[3],
statistics.clus_exfat[4], statistics.clus_exfat[5],
statistics.clus_exfat[6], statistics.clus_exfat[7],
statistics.clus_exfat[8], statistics.clus_exfat[9],
statistics.clus_exfat[10], statistics.clus_exfat[11],
statistics.clus_exfat[12], statistics.clus_exfat[13],
statistics.clus_exfat[14], statistics.clus_exfat[15],
statistics.clus_exfat[16]);
}
static ssize_t mount_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "FAT12_MNT_I:%u,FAT16_MNT_I:%u,FAT32_MNT_I:%u,"
"EXFAT_MNT_I:%u\n",
statistics.mnt_cnt[SDFAT_MNT_FAT12],
statistics.mnt_cnt[SDFAT_MNT_FAT16],
statistics.mnt_cnt[SDFAT_MNT_FAT32],
statistics.mnt_cnt[SDFAT_MNT_EXFAT]);
}
static ssize_t nofat_op_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "NOFAT_MOUNT_I:%u,NOFAT_MKDIR_I:%u,NOFAT_CREATE_I:%u,"
"NOFAT_READ_I:%u,NOFAT_WRITE_I:%u,NOFAT_TRUNC_I:%u\n",
statistics.nofat_op[SDFAT_OP_EXFAT_MNT],
statistics.nofat_op[SDFAT_OP_MKDIR],
statistics.nofat_op[SDFAT_OP_CREATE],
statistics.nofat_op[SDFAT_OP_READ],
statistics.nofat_op[SDFAT_OP_WRITE],
statistics.nofat_op[SDFAT_OP_TRUNC]);
}
static ssize_t vol_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "VOL_4G_I:%u,VOL_8G_I:%u,VOL_16G_I:%u,"
"VOL_32G_I:%u,VOL_64G_I:%u,VOL_128G_I:%u,VOL_256G_I:%u,"
"VOL_512G_I:%u,VOL_XTB_I:%u\n",
statistics.vol_size[SDFAT_VOL_4G],
statistics.vol_size[SDFAT_VOL_8G],
statistics.vol_size[SDFAT_VOL_16G],
statistics.vol_size[SDFAT_VOL_32G],
statistics.vol_size[SDFAT_VOL_64G],
statistics.vol_size[SDFAT_VOL_128G],
statistics.vol_size[SDFAT_VOL_256G],
statistics.vol_size[SDFAT_VOL_512G],
statistics.vol_size[SDFAT_VOL_XTB]);
}
static struct kobj_attribute vfat_cl_attr = __ATTR_RO(vfat_cl);
static struct kobj_attribute exfat_cl_attr = __ATTR_RO(exfat_cl);
static struct kobj_attribute mount_attr = __ATTR_RO(mount);
static struct kobj_attribute nofat_op_attr = __ATTR_RO(nofat_op);
static struct kobj_attribute vol_size_attr = __ATTR_RO(vol_size);
static struct attribute *attributes_statistics[] = {
&vfat_cl_attr.attr,
&exfat_cl_attr.attr,
&mount_attr.attr,
&nofat_op_attr.attr,
&vol_size_attr.attr,
NULL,
};
static struct attribute_group attr_group_statistics = {
.attrs = attributes_statistics,
};
int sdfat_statistics_init(struct kset *sdfat_kset)
{
int err;
sdfat_statistics_kset = kset_create_and_add("statistics", NULL, &sdfat_kset->kobj);
if (!sdfat_statistics_kset) {
pr_err("[SDFAT] failed to create sdfat statistics kobj\n");
return -ENOMEM;
}
err = sysfs_create_group(&sdfat_statistics_kset->kobj, &attr_group_statistics);
if (err) {
pr_err("[SDFAT] failed to create sdfat statistics attributes\n");
kset_unregister(sdfat_statistics_kset);
sdfat_statistics_kset = NULL;
return err;
}
return 0;
}
void sdfat_statistics_uninit(void)
{
if (sdfat_statistics_kset) {
sysfs_remove_group(&sdfat_statistics_kset->kobj, &attr_group_statistics);
kset_unregister(sdfat_statistics_kset);
sdfat_statistics_kset = NULL;
}
memset(&statistics, 0, sizeof(struct sdfat_statistics));
}
void sdfat_statistics_set_mnt(FS_INFO_T *fsi)
{
if (fsi->vol_type == EXFAT) {
statistics.mnt_cnt[SDFAT_MNT_EXFAT]++;
statistics.nofat_op[SDFAT_OP_EXFAT_MNT] = 1;
if (fsi->sect_per_clus_bits < SDFAT_EF_CLUS_MAX)
statistics.clus_exfat[fsi->sect_per_clus_bits]++;
else
statistics.clus_exfat[SDFAT_EF_CLUS_MAX - 1]++;
return;
}
if (fsi->vol_type == FAT32)
statistics.mnt_cnt[SDFAT_MNT_FAT32]++;
else if (fsi->vol_type == FAT16)
statistics.mnt_cnt[SDFAT_MNT_FAT16]++;
else if (fsi->vol_type == FAT12)
statistics.mnt_cnt[SDFAT_MNT_FAT12]++;
if (fsi->sect_per_clus_bits < SDFAT_VF_CLUS_MAX)
statistics.clus_vfat[fsi->sect_per_clus_bits]++;
else
statistics.clus_vfat[SDFAT_VF_CLUS_MAX - 1]++;
}
void sdfat_statistics_set_mkdir(u8 flags)
{
if (flags != 0x03)
return;
statistics.nofat_op[SDFAT_OP_MKDIR] = 1;
}
void sdfat_statistics_set_create(u8 flags)
{
if (flags != 0x03)
return;
statistics.nofat_op[SDFAT_OP_CREATE] = 1;
}
/* flags : file or dir flgas, 0x03 means no fat-chain.
* clu_offset : file or dir logical cluster offset
* create : BMAP_ADD_CLUSTER or not
*
* File or dir have BMAP_ADD_CLUSTER is no fat-chain write
* when they have 0x03 flag and two or more clusters.
* And don`t have BMAP_ADD_CLUSTER is no fat-chain read
* when above same condition.
*/
void sdfat_statistics_set_rw(u8 flags, u32 clu_offset, s32 create)
{
if ((flags == 0x03) && (clu_offset > 1)) {
if (create)
statistics.nofat_op[SDFAT_OP_WRITE] = 1;
else
statistics.nofat_op[SDFAT_OP_READ] = 1;
}
}
/* flags : file or dir flgas, 0x03 means no fat-chain.
* clu : cluster chain
*
* Set no fat-chain trunc when file or dir have 0x03 flag
* and tow or more clusters.
*/
void sdfat_statistics_set_trunc(u8 flags, CHAIN_T *clu)
{
if ((flags == 0x03) && (clu->size > 1))
statistics.nofat_op[SDFAT_OP_TRUNC] = 1;
}
void sdfat_statistics_set_vol_size(struct super_block *sb)
{
u64 vol_size;
FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
vol_size = (u64)fsi->num_sectors << sb->s_blocksize_bits;
if (vol_size <= ((u64)1 << 32))
statistics.vol_size[SDFAT_VOL_4G]++;
else if (vol_size <= ((u64)1 << 33))
statistics.vol_size[SDFAT_VOL_8G]++;
else if (vol_size <= ((u64)1 << 34))
statistics.vol_size[SDFAT_VOL_16G]++;
else if (vol_size <= ((u64)1 << 35))
statistics.vol_size[SDFAT_VOL_32G]++;
else if (vol_size <= ((u64)1 << 36))
statistics.vol_size[SDFAT_VOL_64G]++;
else if (vol_size <= ((u64)1 << 37))
statistics.vol_size[SDFAT_VOL_128G]++;
else if (vol_size <= ((u64)1 << 38))
statistics.vol_size[SDFAT_VOL_256G]++;
else if (vol_size <= ((u64)1 << 39))
statistics.vol_size[SDFAT_VOL_512G]++;
else
statistics.vol_size[SDFAT_VOL_XTB]++;
}

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _UPCASE_H
@ -30,11 +28,11 @@
static inline u16 get_col_index(u16 i)
{
return i >> LOW_INDEX_BIT;
return i >> LOW_INDEX_BIT;
}
static inline u16 get_row_index(u16 i)
{
return i & ~HIGH_INDEX_MASK;
return i & ~HIGH_INDEX_MASK;
}

View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -24,4 +22,4 @@
/* PURPOSE : sdFAT File Manager */
/* */
/************************************************************************/
#define SDFAT_VERSION "1.3.24"
#define SDFAT_VERSION "1.4.16"

73
xattr.c
View File

@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/************************************************************************/
@ -48,7 +46,16 @@ static int can_support(const char *name)
return 0;
}
int sdfat_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
ssize_t sdfat_listxattr(struct dentry *dentry, char *list, size_t size)
{
return 0;
}
/*************************************************************************
* INNER FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
static int __sdfat_xattr_check_support(const char *name)
{
if (can_support(name))
return -EOPNOTSUPP;
@ -56,7 +63,7 @@ int sdfat_setxattr(struct dentry *dentry, const char *name, const void *value, s
return 0;
}
ssize_t sdfat_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
ssize_t __sdfat_getxattr(const char *name, void *value, size_t size)
{
if (can_support(name))
return -EOPNOTSUPP;
@ -67,17 +74,59 @@ ssize_t sdfat_getxattr(struct dentry *dentry, const char *name, void *value, siz
return strlen(default_xattr);
}
ssize_t sdfat_listxattr(struct dentry *dentry, char *list, size_t size)
/*************************************************************************
* FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY
*************************************************************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
static int sdfat_xattr_get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return 0;
return __sdfat_getxattr(name, buffer, size);
}
static int sdfat_xattr_set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
{
return __sdfat_xattr_check_support(name);
}
const struct xattr_handler sdfat_xattr_handler = {
.prefix = "", /* match anything */
.get = sdfat_xattr_get,
.set = sdfat_xattr_set,
};
const struct xattr_handler *sdfat_xattr_handlers[] = {
&sdfat_xattr_handler,
NULL
};
void setup_sdfat_xattr_handler(struct super_block *sb)
{
sb->s_xattr = sdfat_xattr_handlers;
}
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
int sdfat_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
{
return __sdfat_xattr_check_support(name);
}
ssize_t sdfat_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
{
return __sdfat_getxattr(name, value, size);
}
int sdfat_removexattr(struct dentry *dentry, const char *name)
{
if (can_support(name))
return -EOPNOTSUPP;
return 0;
return __sdfat_xattr_check_support(name);
}
void setup_sdfat_xattr_handler(struct super_block *sb)
{
/* DO NOTHING */
}
#endif