/*
* Locking.
*
* i_mutex
* PG_locked
* ->coh_page_guard
* ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
*/
#define DEBUG_SUBSYSTEM S_CLASS
#include <linux/libcfs/libcfs.h>
/* class_put_type() */
#include <obd_class.h>
#include <obd_support.h>
#include <lustre_fid.h>
#include <linux/list.h>
#include <linux/libcfs/libcfs_hash.h> /* for cfs_hash stuff */
#include <cl_object.h>
#include "cl_internal.h"
static struct kmem_cache *cl_env_kmem;
/** Lock class of cl_object_header::coh_page_guard */
static struct lock_class_key cl_page_guard_class;
/** Lock class of cl_object_header::coh_lock_guard */
static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
static struct lock_class_key cl_attr_guard_class;
extern __u32 lu_context_tags_default;
extern __u32 lu_session_tags_default;
/**
* Initialize cl_object_header.
*/
int cl_object_header_init(struct cl_object_header *h)
{
int result;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
spin_lock_init(&h->coh_page_guard);
spin_lock_init(&h->coh_lock_guard);
spin_lock_init(&h->coh_attr_guard);
lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
h->coh_pages = 0;
/* XXX hard coded GFP_* mask. */
INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
INIT_LIST_HEAD(&h->coh_locks);
h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
}
return result;
}
EXPORT_SYMBOL(cl_object_header_init);
/**
* Finalize cl_object_header.
*/
void cl_object_header_fini(struct cl_object_header *h)
{
LASSERT(list_empty(&h->coh_locks));
lu_object_header_fini(&h->coh_lu);
}
EXPORT_SYMBOL(cl_object_header_fini);
/**
* Returns a cl_object with a given \a fid.
*
* Returns either cached or newly created object. Additional reference on the
* returned object is acquired.
*
* \see lu_object_find(), cl_page_find(), cl_lock_find()
*/
struct cl_object *cl_object_find(const struct lu_env *env,
struct cl_device *cd, const struct lu_fid *fid,
const struct cl_object_conf *c)
{
might_sleep();
return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
}
EXPORT_SYMBOL(cl_object_find);
/**
* Releases a reference on \a o.
*
* When last reference is released object is returned to the cache, unless
* lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
*
* \see cl_page_put(), cl_lock_put().
*/
void cl_object_put(const struct lu_env *env, struct cl_object *o)
{
lu_object_put(env, &o->co_lu);
}
EXPORT_SYMBOL(cl_object_put);
/**
* Acquire an additional reference to the object \a o.
*
* This can only be used to acquire _additional_ reference, i.e., caller
* already has to possess at least one reference to \a o before calling this.
*
* \see cl_page_get(), cl_lock_get().
*/
void cl_object_get(struct cl_object *o)
{
lu_object_get(&o->co_lu);
}
EXPORT_SYMBOL(cl_object_get);
/**
* Returns the top-object for a given \a o.
*
* \see cl_page_top(), cl_io_top()
*/
struct cl_object *cl_object_top(struct cl_object *o)
{
struct cl_object_header *hdr = cl_object_header(o);
struct cl_object *top;
while (hdr->coh_parent != NULL)
hdr = hdr->coh_parent;
top = lu2cl(lu_object_top(&hdr->coh_lu));
CDEBUG(D_TRACE, "%p -> %p\n", o, top);
return top;
}
EXPORT_SYMBOL(cl_object_top);
/**
* Returns pointer to the lock protecting data-attributes for the given object
* \a o.
*
* Data-attributes are protected by the cl_object_header::coh_attr_guard
* spin-lock in the top-object.
*
* \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
*/
static spinlock_t *cl_object_attr_guard(struct cl_object *o)
{
return &cl_object_header(cl_object_top(o))->coh_attr_guard;
}
/**
* Locks data-attributes.
*
* Prevents data-attributes from changing, until lock is released by
* cl_object_attr_unlock(). This has to be called before calls to
* cl_object_attr_get(), cl_object_attr_set().
*/
void cl_object_attr_lock(struct cl_object *o)
{
spin_lock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_lock);
/**
* Releases data-attributes lock, acquired by cl_object_attr_lock().
*/
void cl_object_attr_unlock(struct cl_object *o)
{
spin_unlock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_unlock);
/**
* Returns data-attributes of an object \a obj.
*
* Every layer is asked (by calling cl_object_operations::coo_attr_get())
* top-to-bottom to fill in parts of \a attr that this layer is responsible
* for.
*/
int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
struct lu_object_header *top;
int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_get != NULL) {
result = obj->co_ops->coo_attr_get(env, obj, attr);
if (result != 0) {
if (result > 0)
result = 0;
break;
}
}
}
return result;
}
EXPORT_SYMBOL(cl_object_attr_get);
/**
* Updates data-attributes of an object \a obj.
*
* Only attributes, mentioned in a validness bit-mask \a v are
* updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
* to top.
*/
int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned v)
{
struct lu_object_header *top;
int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers,
co_lu.lo_linkage) {
if (obj->co_ops->coo_attr_set != NULL) {
result = obj->co_ops->coo_attr_set(env, obj, attr, v);
if (result != 0) {
if (result > 0)
result = 0;
break;
}
}
}
return result;
}
EXPORT_SYMBOL(cl_object_attr_set);
/**
* Notifies layers (bottom-to-top) that glimpse AST was received.
*
* Layers have to fill \a lvb fields with information that will be shipped
* back to glimpse issuer.
*
* \see cl_lock_operations::clo_glimpse()
*/
int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb)
{
struct lu_object_header *top;
int result;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers,
co_lu.lo_linkage) {
if (obj->co_ops->coo_glimpse != NULL) {
result = obj->co_ops->coo_glimpse(env, obj, lvb);
if (result != 0)
break;
}
}
LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
"size: "LPU64" mtime: "LPU64" atime: "LPU64" "
"ctime: "LPU64" blocks: "LPU64"\n",
lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
lvb->lvb_ctime, lvb->lvb_blocks);
return result;
}
EXPORT_SYMBOL(cl_object_glimpse);
/**
* Updates a configuration of an object \a obj.
*/
int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf)
{
struct lu_object_header *top;
int result;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
if (obj->co_ops->coo_conf_set != NULL) {
result = obj->co_ops->coo_conf_set(env, obj, conf);
if (result != 0)
break;
}
}
return result;
}
EXPORT_SYMBOL(cl_conf_set);
/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
* This is called by cl_inode_fini() and lov_object_delete() to destroy top-
* and sub- objects respectively.
*/
void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
{
struct cl_object_header *hdr;
hdr = cl_object_header(obj);
LASSERT(hdr->coh_tree.rnode == NULL);
LASSERT(hdr->coh_pages == 0);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
/*
* Destroy all locks. Object destruction (including cl_inode_fini())
* cannot cancel the locks, because in the
cl_object.rar_objects
版权申诉
140 浏览量
2022-09-23
00:23:05
上传
评论
收藏 37KB RAR 举报
weixin_42651887
- 粉丝: 80
- 资源: 1万+
最新资源
- Anaconda3-2024.02-1-Windows-x86-64.exe
- stm32cubemx can通信 一个板子负责传感器采集 一个板子负责wifi传递到网页 网页可控制传感器板子.zip
- k8s集群搭建1.27.1版本(来源于图灵k8s笔记)
- 树莓派连接 DS18B20 温度传感器读取温度时监控客户端,服务器端连接状况,实现断线重连机制
- 小游戏-坦克大战,你认为的小游戏
- 最好用的富文本编辑器wangeditor
- jQuery 3.7.1
- 校园失物招领小程序源码可作毕业设计
- SAP客户端GUI740安装包(JAVA版本)
- winlibs-x86-64-posix-seh-gcc-13.2.0-llvm-16.0.6-mingw-w64msvcrt
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈