diff --git a/Makefile b/Makefile index d2051060c1..1f6dee240f 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -VERSION?=7.2.5 +VERSION?=7.4.0 PROJECT?=redis GH_ORG?=redis SPRING_PROFILE?=ci diff --git a/pom.xml b/pom.xml index fd73e23ebd..1f6369e3b6 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ org.springframework.data spring-data-redis - 3.5.0-SNAPSHOT + 3.5.x-GH-3054-SNAPSHOT Spring Data Redis Spring Data module for Redis diff --git a/src/main/antora/modules/ROOT/pages/appendix.adoc b/src/main/antora/modules/ROOT/pages/appendix.adoc index 46feff0611..669bf82204 100644 --- a/src/main/antora/modules/ROOT/pages/appendix.adoc +++ b/src/main/antora/modules/ROOT/pages/appendix.adoc @@ -8,193 +8,201 @@ link:https://www.springframework.org/schema/redis/spring-redis-1.0.xsd[Spring Da [[supported-commands]] == Supported Commands + .Redis commands supported by `RedisTemplate` [width="50%",cols="<2,^1",options="header"] |========================================================= |Command |Template Support -|APPEND |X -|AUTH |X -|BGREWRITEAOF |X -|BGSAVE |X -|BITCOUNT |X -|BITFIELD |X -|BITOP |X -|BLPOP |X -|BRPOP |X -|BRPOPLPUSH |X -|CLIENT KILL |X -|CLIENT GETNAME |X -|CLIENT LIST |X -|CLIENT SETNAME |X -|CLUSTER SLOTS |- -|COMMAND |- -|COMMAND COUNT |- -|COMMAND GETKEYS |- -|COMMAND INFO |- -|CONFIG GET |X -|CONFIG RESETSTAT |X -|CONFIG REWRITE |- -|CONFIG SET |X -|DBSIZE |X -|DEBUG OBJECT |- -|DEBUG SEGFAULT |- -|DECR |X -|DECRBY |X -|DEL |X -|DISCARD |X -|DUMP |X -|ECHO |X -|EVAL |X -|EVALSHA |X -|EXEC |X -|EXISTS |X -|EXPIRE |X -|EXPIREAT |X -|FLUSHALL |X -|FLUSHDB |X -|GEOADD |X -|GEODIST |X -|GEOHASH |X -|GEOPOS |X -|GEORADIUS |X -|GEORADIUSBYMEMBER |X -|GEOSEARCH |X -|GEOSEARCHSTORE |X -|GET |X -|GETBIT |X -|GETRANGE |X -|GETSET |X -|HDEL |X -|HEXISTS |X -|HGET |X -|HGETALL |X -|HINCRBY |X -|HINCRBYFLOAT |X -|HKEYS |X -|HLEN |X -|HMGET |X -|HMSET |X -|HSCAN |X -|HSET |X -|HSETNX |X -|HVALS |X -|INCR |X -|INCRBY |X -|INCRBYFLOAT |X -|INFO |X -|KEYS |X -|LASTSAVE |X -|LINDEX |X -|LINSERT |X -|LLEN |X -|LPOP |X -|LPUSH |X -|LPUSHX |X -|LRANGE |X -|LREM |X -|LSET |X -|LTRIM |X -|MGET |X -|MIGRATE |- -|MONITOR |- -|MOVE |X -|MSET |X -|MSETNX |X -|MULTI |X -|OBJECT |- -|PERSIST |X -|PEXIPRE |X -|PEXPIREAT |X -|PFADD |X -|PFCOUNT |X -|PFMERGE |X -|PING |X -|PSETEX |X -|PSUBSCRIBE |X -|PTTL |X -|PUBLISH |X -|PUBSUB |- -|PUBSUBSCRIBE |- -|QUIT |X -|RANDOMKEY |X -|RENAME |X -|RENAMENX |X -|REPLICAOF |X -|RESTORE |X -|ROLE |- -|RPOP |X -|RPOPLPUSH |X -|RPUSH |X -|RPUSHX |X -|SADD |X -|SAVE |X -|SCAN |X -|SCARD |X -|SCRIPT EXITS |X -|SCRIPT FLUSH |X -|SCRIPT KILL |X -|SCRIPT LOAD |X -|SDIFF |X -|SDIFFSTORE |X -|SELECT |X -|SENTINEL FAILOVER |X +|APPEND |X +|AUTH |X +|BGREWRITEAOF |X +|BGSAVE |X +|BITCOUNT |X +|BITFIELD |X +|BITOP |X +|BLPOP |X +|BRPOP |X +|BRPOPLPUSH |X +|CLIENT KILL |X +|CLIENT GETNAME |X +|CLIENT LIST |X +|CLIENT SETNAME |X +|CLUSTER SLOTS |- +|COMMAND |- +|COMMAND COUNT |- +|COMMAND GETKEYS |- +|COMMAND INFO |- +|CONFIG GET |X +|CONFIG RESETSTAT |X +|CONFIG REWRITE |- +|CONFIG SET |X +|DBSIZE |X +|DEBUG OBJECT |- +|DEBUG SEGFAULT |- +|DECR |X +|DECRBY |X +|DEL |X +|DISCARD |X +|DUMP |X +|ECHO |X +|EVAL |X +|EVALSHA |X +|EXEC |X +|EXISTS |X +|EXPIRE |X +|EXPIREAT |X +|FLUSHALL |X +|FLUSHDB |X +|GEOADD |X +|GEODIST |X +|GEOHASH |X +|GEOPOS |X +|GEORADIUS |X +|GEORADIUSBYMEMBER |X +|GEOSEARCH |X +|GEOSEARCHSTORE |X +|GET |X +|GETBIT |X +|GETRANGE |X +|GETSET |X +|HDEL |X +|HEXISTS |X +|HEXPIRE |X +|HEXPIREAT |X +|HPEXPIRE |X +|HPEXPIREAT |X +|HPERSIST |X +|HTTL |X +|HPTTL |X +|HGET |X +|HGETALL |X +|HINCRBY |X +|HINCRBYFLOAT |X +|HKEYS |X +|HLEN |X +|HMGET |X +|HMSET |X +|HSCAN |X +|HSET |X +|HSETNX |X +|HVALS |X +|INCR |X +|INCRBY |X +|INCRBYFLOAT |X +|INFO |X +|KEYS |X +|LASTSAVE |X +|LINDEX |X +|LINSERT |X +|LLEN |X +|LPOP |X +|LPUSH |X +|LPUSHX |X +|LRANGE |X +|LREM |X +|LSET |X +|LTRIM |X +|MGET |X +|MIGRATE |- +|MONITOR |- +|MOVE |X +|MSET |X +|MSETNX |X +|MULTI |X +|OBJECT |- +|PERSIST |X +|PEXIPRE |X +|PEXPIREAT |X +|PFADD |X +|PFCOUNT |X +|PFMERGE |X +|PING |X +|PSETEX |X +|PSUBSCRIBE |X +|PTTL |X +|PUBLISH |X +|PUBSUB |- +|PUBSUBSCRIBE |- +|QUIT |X +|RANDOMKEY |X +|RENAME |X +|RENAMENX |X +|REPLICAOF |X +|RESTORE |X +|ROLE |- +|RPOP |X +|RPOPLPUSH |X +|RPUSH |X +|RPUSHX |X +|SADD |X +|SAVE |X +|SCAN |X +|SCARD |X +|SCRIPT EXITS |X +|SCRIPT FLUSH |X +|SCRIPT KILL |X +|SCRIPT LOAD |X +|SDIFF |X +|SDIFFSTORE |X +|SELECT |X +|SENTINEL FAILOVER |X |SENTINEL GET-MASTER-ADD-BY-NAME |- -|SENTINEL MASTER | - -|SENTINEL MASTERS |X -|SENTINEL MONITOR |X -|SENTINEL REMOVE |X -|SENTINEL RESET |- -|SENTINEL SET |- -|SENTINEL SLAVES |X -|SET |X -|SETBIT |X -|SETEX |X -|SETNX |X -|SETRANGE |X -|SHUTDOWN |X -|SINTER |X -|SINTERSTORE |X -|SISMEMBER |X -|SLAVEOF |X -|SLOWLOG |- -|SMEMBERS |X -|SMOVE |X -|SORT |X -|SPOP |X -|SRANDMEMBER |X -|SREM |X -|SSCAN |X -|STRLEN |X -|SUBSCRIBE |X -|SUNION |X -|SUNIONSTORE |X -|SYNC |- -|TIME |X -|TTL |X -|TYPE |X -|UNSUBSCRIBE |X -|UNWATCH |X -|WATCH |X -|ZADD |X -|ZCARD |X -|ZCOUNT |X -|ZINCRBY |X -|ZINTERSTORE |X -|ZLEXCOUNT |- -|ZRANGE |X -|ZRANGEBYLEX |- -|ZREVRANGEBYLEX |- -|ZRANGEBYSCORE |X -|ZRANGESTORE |X -|ZRANK |X -|ZREM |X -|ZREMRANGEBYLEX |- -|ZREMRANGEBYRANK |X -|ZREVRANGE |X -|ZREVRANGEBYSCORE |X -|ZREVRANK |X -|ZSCAN |X -|ZSCORE |X -|ZUNINONSTORE |X +|SENTINEL MASTER | - +|SENTINEL MASTERS |X +|SENTINEL MONITOR |X +|SENTINEL REMOVE |X +|SENTINEL RESET |- +|SENTINEL SET |- +|SENTINEL SLAVES |X +|SET |X +|SETBIT |X +|SETEX |X +|SETNX |X +|SETRANGE |X +|SHUTDOWN |X +|SINTER |X +|SINTERSTORE |X +|SISMEMBER |X +|SLAVEOF |X +|SLOWLOG |- +|SMEMBERS |X +|SMOVE |X +|SORT |X +|SPOP |X +|SRANDMEMBER |X +|SREM |X +|SSCAN |X +|STRLEN |X +|SUBSCRIBE |X +|SUNION |X +|SUNIONSTORE |X +|SYNC |- +|TIME |X +|TTL |X +|TYPE |X +|UNSUBSCRIBE |X +|UNWATCH |X +|WATCH |X +|ZADD |X +|ZCARD |X +|ZCOUNT |X +|ZINCRBY |X +|ZINTERSTORE |X +|ZLEXCOUNT |- +|ZRANGE |X +|ZRANGEBYLEX |- +|ZREVRANGEBYLEX |- +|ZRANGEBYSCORE |X +|ZRANGESTORE |X +|ZRANK |X +|ZREM |X +|ZREMRANGEBYLEX |- +|ZREMRANGEBYRANK |X +|ZREVRANGE |X +|ZREVRANGEBYSCORE |X +|ZREVRANK |X +|ZSCAN |X +|ZSCORE |X +|ZUNINONSTORE |X |========================================================= diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java index 8fe2f2c9f7..17dc41378d 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultStringRedisConnection.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + import org.springframework.core.convert.converter.Converter; import org.springframework.data.geo.Circle; import org.springframework.data.geo.Distance; @@ -30,14 +31,24 @@ import org.springframework.data.geo.Metric; import org.springframework.data.geo.Point; import org.springframework.data.redis.RedisSystemException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.connection.convert.ListConverter; import org.springframework.data.redis.connection.convert.MapConverter; import org.springframework.data.redis.connection.convert.SetConverter; -import org.springframework.data.redis.connection.stream.*; +import org.springframework.data.redis.connection.stream.ByteRecord; +import org.springframework.data.redis.connection.stream.Consumer; +import org.springframework.data.redis.connection.stream.MapRecord; +import org.springframework.data.redis.connection.stream.PendingMessages; +import org.springframework.data.redis.connection.stream.PendingMessagesSummary; +import org.springframework.data.redis.connection.stream.ReadOffset; +import org.springframework.data.redis.connection.stream.RecordId; import org.springframework.data.redis.connection.stream.StreamInfo.XInfoConsumers; import org.springframework.data.redis.connection.stream.StreamInfo.XInfoGroups; import org.springframework.data.redis.connection.stream.StreamInfo.XInfoStream; +import org.springframework.data.redis.connection.stream.StreamOffset; +import org.springframework.data.redis.connection.stream.StreamReadOptions; +import org.springframework.data.redis.connection.stream.StringRecord; import org.springframework.data.redis.connection.zset.Aggregate; import org.springframework.data.redis.connection.zset.DefaultTuple; import org.springframework.data.redis.connection.zset.Tuple; @@ -2560,12 +2571,105 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return this.delegate.hScan(key, options); } - @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { return convertAndReturn(delegate.hStrLen(key, field), Converters.identityConverter()); } + public @Nullable List applyExpiration(byte[] key, + org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + return this.delegate.applyExpiration(key, expiration, options, fields); + } + + @Override + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + return this.delegate.hExpire(key, seconds, condition, fields); + } + + @Override + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + return this.delegate.hpExpire(key, millis, condition, fields); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { + return this.delegate.hExpireAt(key, unixTime, condition, fields); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return this.delegate.hpExpireAt(key, unixTimeInMillis, condition, fields); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return this.delegate.hPersist(key, fields); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return this.delegate.hTtl(key, fields); + } + + @Override + public List hpTtl(byte[] key, byte[]... fields) { + return this.delegate.hpTtl(key, fields); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return this.delegate.hTtl(key, timeUnit, fields); + } + + public @Nullable List applyExpiration(String key, + org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, String... fields) { + return applyExpiration(serialize(key), expiration, options, serializeMulti(fields)); + } + + @Override + public List hExpire(String key, long seconds, FieldExpirationOptions.Condition condition, String... fields) { + return hExpire(serialize(key), seconds, condition, serializeMulti(fields)); + } + + @Override + public List hpExpire(String key, long millis, FieldExpirationOptions.Condition condition, String... fields) { + return hpExpire(serialize(key), millis, condition, serializeMulti(fields)); + } + + @Override + public List hExpireAt(String key, long unixTime, FieldExpirationOptions.Condition condition, String... fields) { + return hExpireAt(serialize(key), unixTime, condition, serializeMulti(fields)); + } + + @Override + public List hpExpireAt(String key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + String... fields) { + return hpExpireAt(serialize(key), unixTimeInMillis, condition, serializeMulti(fields)); + } + + @Override + public List hPersist(String key, String... fields) { + return hPersist(serialize(key), serializeMulti(fields)); + } + + @Override + public List hTtl(String key, String... fields) { + return hTtl(serialize(key), serializeMulti(fields)); + } + + @Override + public List hTtl(String key, TimeUnit timeUnit, String... fields) { + return hTtl(serialize(key), timeUnit, serializeMulti(fields)); + } + + @Override + public List hpTtl(String key, String... fields) { + return hTtl(serialize(key), serializeMulti(fields)); + } + @Override public void setClientName(byte[] name) { this.delegate.setClientName(name); diff --git a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java index aa5f6de773..460d883b41 100644 --- a/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/DefaultedRedisConnection.java @@ -28,6 +28,7 @@ import org.springframework.data.geo.GeoResults; import org.springframework.data.geo.Metric; import org.springframework.data.geo.Point; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.stream.ByteRecord; import org.springframework.data.redis.connection.stream.Consumer; import org.springframework.data.redis.connection.stream.MapRecord; @@ -65,6 +66,7 @@ * @author ihaohong * @author Dennis Neufeld * @author Shyngys Sapraliyev + * @author Tihomir Mateev * @since 2.0 */ @Deprecated @@ -1470,6 +1472,101 @@ default Long hStrLen(byte[] key, byte[] field) { return hashCommands().hStrLen(key, field); } + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpire(byte[] key, long seconds, byte[]... fields) { + return hashCommands().hExpire(key, seconds, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + return hashCommands().hExpire(key, seconds, condition, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpire(byte[] key, long millis, byte[]... fields) { + return hashCommands().hpExpire(key, millis, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + return hashCommands().hpExpire(key, millis, condition, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return hashCommands().hExpireAt(key, unixTime, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return hashCommands().hExpireAt(key, unixTime, condition, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return hashCommands().hpExpireAt(key, unixTimeInMillis, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return hashCommands().hpExpireAt(key, unixTimeInMillis, condition, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hPersist(byte[] key, byte[]... fields) { + return hashCommands().hPersist(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hTtl(byte[] key, byte[]... fields) { + return hashCommands().hTtl(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return hashCommands().hTtl(key, timeUnit, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default List hpTtl(byte[] key, byte[]... fields) { + return hashCommands().hpTtl(key, fields); + } + + /** @deprecated in favor of {@link RedisConnection#hashCommands()}}. */ + @Override + @Deprecated + default @Nullable List applyExpiration(byte[] key, + org.springframework.data.redis.core.types.Expiration expiration, FieldExpirationOptions options, + byte[]... fields) { + return hashCommands().applyExpiration(key, expiration, options, fields); + } + // GEO COMMANDS /** @deprecated in favor of {@link RedisConnection#geoCommands()}}. */ @@ -1841,9 +1938,8 @@ default T evalSha(byte[] scriptSha, ReturnType returnType, int numKeys, byte /** @deprecated in favor of {@link RedisConnection#zSetCommands()}}. */ @Override @Deprecated - default Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey, - org.springframework.data.domain.Range range, - org.springframework.data.redis.connection.Limit limit) { + default Long zRangeStoreByLex(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range range, + org.springframework.data.redis.connection.Limit limit) { return zSetCommands().zRangeStoreByLex(dstKey, srcKey, range, limit); } @@ -1860,7 +1956,7 @@ default Long zRangeStoreRevByLex(byte[] dstKey, byte[] srcKey, org.springframewo @Deprecated default Long zRangeStoreByScore(byte[] dstKey, byte[] srcKey, org.springframework.data.domain.Range range, - org.springframework.data.redis.connection.Limit limit) { + org.springframework.data.redis.connection.Limit limit) { return zSetCommands().zRangeStoreByScore(dstKey, srcKey, range, limit); } diff --git a/src/main/java/org/springframework/data/redis/connection/Hash.java b/src/main/java/org/springframework/data/redis/connection/Hash.java new file mode 100644 index 0000000000..51e326dd2b --- /dev/null +++ b/src/main/java/org/springframework/data/redis/connection/Hash.java @@ -0,0 +1,137 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.connection; + +import java.util.Objects; + +import org.springframework.lang.Contract; +import org.springframework.util.ObjectUtils; + +/** + * Types for interacting with Hash data structures. + * + * @author Christoph Strobl + * @since 3.5 + */ +public interface Hash { + + /** + * Expiration options for Hash Expiation updates. + */ + class FieldExpirationOptions { + + private static final FieldExpirationOptions NONE = new FieldExpirationOptions(Condition.ALWAYS); + private final Condition condition; + + FieldExpirationOptions(Condition condition) { + this.condition = condition; + } + + public static FieldExpirationOptions none() { + return NONE; + } + + public static FieldExpireOptionsBuilder builder() { + return new FieldExpireOptionsBuilder(); + } + + public Condition getCondition() { + return condition; + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FieldExpirationOptions that = (FieldExpirationOptions) o; + return ObjectUtils.nullSafeEquals(this.condition, that.condition); + } + + @Override + public int hashCode() { + return Objects.hash(condition); + } + + public static class FieldExpireOptionsBuilder { + + private Condition condition = Condition.ALWAYS; + + @Contract("-> this") + public FieldExpireOptionsBuilder nx() { + this.condition = Condition.NX; + return this; + } + + @Contract("-> this") + public FieldExpireOptionsBuilder xx() { + this.condition = Condition.XX; + return this; + } + + @Contract("-> this") + public FieldExpireOptionsBuilder gt() { + this.condition = Condition.GT; + return this; + } + + @Contract("-> this") + public FieldExpireOptionsBuilder lt() { + this.condition = Condition.LT; + return this; + } + + public FieldExpirationOptions build() { + return condition == Condition.ALWAYS ? NONE : new FieldExpirationOptions(condition); + } + + } + + public enum Condition { + + /** + * Always apply expiration. + */ + ALWAYS, + + /** + * Set expiration only when the field has no expiration. + */ + NX, + + /** + * Set expiration only when the field has an existing expiration. + */ + XX, + + /** + * Set expiration only when the new expiration is greater than current one. + */ + GT, + + /** + * Set expiration only when the new expiration is greater than current one. + */ + LT + + } + + } + +} diff --git a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java index 0fae8d30b8..c463737747 100644 --- a/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/ReactiveHashCommands.java @@ -19,15 +19,19 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.reactivestreams.Publisher; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse; import org.springframework.data.redis.connection.ReactiveRedisConnection.Command; import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse; @@ -36,6 +40,7 @@ import org.springframework.data.redis.connection.ReactiveRedisConnection.MultiValueResponse; import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.core.ScanOptions; +import org.springframework.data.redis.core.types.Expiration; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -44,10 +49,34 @@ * * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev * @since 2.0 */ public interface ReactiveHashCommands { + /** + * {@link Command} for hash-bound operations. + * + * @author Christoph Strobl + * @author Tihomir Mateev + */ + class HashFieldsCommand extends KeyCommand { + + private final List fields; + + private HashFieldsCommand(@Nullable ByteBuffer key, List fields) { + super(key); + this.fields = fields; + } + + /** + * @return never {@literal null}. + */ + public List getFields() { + return fields; + } + } + /** * {@literal HSET} {@link Command}. * @@ -216,15 +245,10 @@ default Mono hMSet(ByteBuffer key, Map fieldVal * @author Christoph Strobl * @see Redis Documentation: HGET */ - class HGetCommand extends KeyCommand { - - private List fields; + class HGetCommand extends HashFieldsCommand { private HGetCommand(@Nullable ByteBuffer key, List fields) { - - super(key); - - this.fields = fields; + super(key, fields); } /** @@ -263,14 +287,7 @@ public HGetCommand from(ByteBuffer key) { Assert.notNull(key, "Key must not be null"); - return new HGetCommand(key, fields); - } - - /** - * @return never {@literal null}. - */ - public List getFields() { - return fields; + return new HGetCommand(key, getFields()); } } @@ -394,15 +411,10 @@ default Mono hExists(ByteBuffer key, ByteBuffer field) { * @author Christoph Strobl * @see Redis Documentation: HDEL */ - class HDelCommand extends KeyCommand { - - private final List fields; + class HDelCommand extends HashFieldsCommand { private HDelCommand(@Nullable ByteBuffer key, List fields) { - - super(key); - - this.fields = fields; + super(key, fields); } /** @@ -441,14 +453,7 @@ public HDelCommand from(ByteBuffer key) { Assert.notNull(key, "Key must not be null"); - return new HDelCommand(key, fields); - } - - /** - * @return never {@literal null}. - */ - public List getFields() { - return fields; + return new HDelCommand(key, getFields()); } } @@ -842,4 +847,412 @@ default Mono hStrLen(ByteBuffer key, ByteBuffer field) { * @since 2.1 */ Flux> hStrLen(Publisher commands); + + /** + * @since 3.5 + */ + class ExpireCommand extends HashFieldsCommand { + + private final Expiration expiration; + private final FieldExpirationOptions options; + + private ExpireCommand(@Nullable ByteBuffer key, List fields, Expiration expiration, + FieldExpirationOptions options) { + + super(key, fields); + + this.expiration = expiration; + this.options = options; + } + + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to + * @param timeout the actual timeout + * @param unit the unit of measure for the {@code timeout}. + * @return new instance of {@link ExpireCommand}. + */ + public static ExpireCommand expire(List fields, long timeout, TimeUnit unit) { + + Assert.notNull(fields, "Field must not be null"); + return expire(fields, Expiration.from(timeout, unit)); + } + + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to. + * @param ttl the actual timeout. + * @return new instance of {@link ExpireCommand}. + */ + public static ExpireCommand expire(List fields, Duration ttl) { + + Assert.notNull(fields, "Field must not be null"); + return expire(fields, Expiration.from(ttl)); + } + + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to + * @param expiration the {@link Expiration} to apply to the given {@literal fields}. + * @return new instance of {@link ExpireCommand}. + */ + public static ExpireCommand expire(List fields, Expiration expiration) { + return new ExpireCommand(null, fields, expiration, FieldExpirationOptions.none()); + } + + /** + * Creates a new {@link ExpireCommand}. + * + * @param fields the {@code field} names to apply expiration to + * @param ttl the unix point in time when to expire the given {@literal fields}. + * @param precision can be {@link TimeUnit#SECONDS} or {@link TimeUnit#MILLISECONDS}. + * @return new instance of {@link ExpireCommand}. + */ + public static ExpireCommand expireAt(List fields, Instant ttl, TimeUnit precision) { + + if (precision.compareTo(TimeUnit.MILLISECONDS) > 0) { + return expire(fields, Expiration.unixTimestamp(ttl.getEpochSecond(), TimeUnit.SECONDS)); + } + + return expire(fields, Expiration.unixTimestamp(ttl.toEpochMilli(), TimeUnit.MILLISECONDS)); + } + + /** + * @param key the {@literal key} from which to expire the {@literal fields} from. + * @return new instance of {@link ExpireCommand}. + */ + public ExpireCommand from(ByteBuffer key) { + return new ExpireCommand(key, getFields(), expiration, options); + } + + /** + * @param options additional options to be sent along with the command. + * @return new instance of {@link ExpireCommand}. + */ + public ExpireCommand withOptions(FieldExpirationOptions options) { + return new ExpireCommand(getKey(), getFields(), getExpiration(), options); + } + + public Expiration getExpiration() { + return expiration; + } + + public FieldExpirationOptions getOptions() { + return options; + } + } + + /** + * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Mono hExpire(ByteBuffer key, Duration duration, ByteBuffer field) { + + Assert.notNull(duration, "Duration must not be null"); + + return hExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has + * passed. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Flux hExpire(ByteBuffer key, Duration duration, List fields) { + + Assert.notNull(duration, "Duration must not be null"); + + return applyExpiration(Flux.just(ExpireCommand.expire(fields, duration).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has + * passed. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; + * @since 3.5 + * @see Redis Documentation: HEXPIRE + */ + Flux> applyExpiration(Publisher commands); + + /** + * Expire a given {@literal field} after a given {@link Duration} of time, measured in milliseconds, has passed. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time is set/updated; + * {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is not met); + * {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Mono hpExpire(ByteBuffer key, Duration duration, ByteBuffer field) { + + Assert.notNull(duration, "Duration must not be null"); + + return hpExpire(key, duration, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} after a given {@link Duration} of time, measured in milliseconds, has + * passed. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param duration must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + default Flux hpExpire(ByteBuffer key, Duration duration, List fields) { + + Assert.notNull(duration, "Duration must not be null"); + + return applyExpiration(Flux.just(new ExpireCommand(key, fields, + Expiration.from(duration.toMillis(), TimeUnit.MILLISECONDS), FieldExpirationOptions.none()))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is + * not met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + default Mono hExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { + + Assert.notNull(expireAt, "Duration must not be null"); + + return hExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + default Flux hExpireAt(ByteBuffer key, Instant expireAt, List fields) { + + Assert.notNull(expireAt, "Duration must not be null"); + + return applyExpiration(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.SECONDS).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Expire a given {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Mono} emitting the expiration result - {@code 2} indicating the specific field is deleted already + * due to expiration, or provided expiry interval is in the past; {@code 1} indicating expiration time is + * set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition is + * not met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + default Mono hpExpireAt(ByteBuffer key, Instant expireAt, ByteBuffer field) { + + Assert.notNull(expireAt, "Duration must not be null"); + + return hpExpireAt(key, expireAt, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Expire a {@link List} of {@literal field} in a given {@link Instant} of time, indicated as an absolute + * Unix timestamp in milliseconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @return a {@link Flux} emitting the expiration results one by one, {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + default Flux hpExpireAt(ByteBuffer key, Instant expireAt, List fields) { + + Assert.notNull(expireAt, "Duration must not be null"); + + return applyExpiration(Flux.just(ExpireCommand.expireAt(fields, expireAt, TimeUnit.MILLISECONDS).from(key))) + .mapNotNull(NumericResponse::getOutput); + } + + /** + * Persist a given {@literal field} removing any associated expiration, measured as absolute + * Unix timestamp in seconds since Unix epoch + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the persist result - {@code 1} indicating expiration time is removed; {@code -1} + * field has no expiration time to be removed; {@code -2} indicating there is no such field; + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + default Mono hPersist(ByteBuffer key, ByteBuffer field) { + return hPersist(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Persist a given {@link List} of {@literal field} removing any associated expiration. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + default Flux hPersist(ByteBuffer key, List fields) { + return hPersist(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Persist a given {@link List} of {@literal field} removing any associated expiration. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; * @since 3.5 + * @see Redis Documentation: HPERSIST + */ + Flux> hPersist(Publisher commands); + + /** + * Returns the time-to-live of a given {@literal field} in seconds. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the TTL result - the time to live in seconds; or a negative value to signal an + * error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; + * @see Redis Documentation: HTTL + * @since 3.5 + */ + default Mono hTtl(ByteBuffer key, ByteBuffer field) { + return hTtl(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the TTL results one by one - the time to live in seconds; or a negative value to + * signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @see Redis Documentation: HTTL + * @since 3.5 + */ + default Flux hTtl(ByteBuffer key, List fields) { + return hTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in seconds. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; + * @since 3.5 + * @see Redis Documentation: HTTL + */ + Flux> hTtl(Publisher commands); + + /** + * Returns the time-to-live of a given {@literal field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param field must not be {@literal null}. + * @return a {@link Mono} emitting the TTL result - the time to live in milliseconds; or a negative value to signal an + * error. The command returns {@code -1} if the key exists but has no associated expiration time. The command + * returns {@code -2} if the key does not exist; + * @see Redis Documentation: HPTTL + * @since 3.5 + */ + default Mono hpTtl(ByteBuffer key, ByteBuffer field) { + return hpTtl(key, Collections.singletonList(field)).singleOrEmpty(); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a {@link Flux} emitting the TTL results one by one - the time to live in milliseconds; or a negative value + * to signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; + * @see Redis Documentation: HPTTL + * @since 3.5 + */ + default Flux hpTtl(ByteBuffer key, List fields) { + return hpTtl(Flux.just(new HashFieldsCommand(key, fields))).mapNotNull(NumericResponse::getOutput); + } + + /** + * Returns the time-to-live of all the given {@literal field} in the {@link List} in milliseconds. + * + * @param commands must not be {@literal null}. + * @return a {@link Flux} emitting the persisting results one by one - the time to live in milliseconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; + * @since 3.5 + * @see Redis Documentation: HPTTL + */ + Flux> hpTtl(Publisher commands); + } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java index 6385c56a57..d038708526 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisHashCommands.java @@ -15,13 +15,17 @@ */ package org.springframework.data.redis.connection; +import java.time.Duration; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; +import org.springframework.util.ObjectUtils; /** * Hash-specific commands supported by Redis. @@ -29,6 +33,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev */ public interface RedisHashCommands { @@ -249,4 +254,314 @@ public interface RedisHashCommands { */ @Nullable Long hStrLen(byte[] key, byte[] field); + + /** + * Apply a given {@link org.springframework.data.redis.core.types.Expiration} to the given {@literal fields}. + * + * @param key must not be {@literal null}. + * @param expiration the {@link org.springframework.data.redis.core.types.Expiration} to apply. + * @param fields the names of the {@literal fields} to apply the {@literal expiration} to. + * @return a {@link List} holding the command result for each field in order - {@code 2} indicating the specific field + * is deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration + * time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no + * such field; + * @since 3.5 + */ + default @Nullable List applyExpiration(byte[] key, + org.springframework.data.redis.core.types.Expiration expiration, byte[]... fields) { + return applyExpiration(key, expiration, FieldExpirationOptions.none(), fields); + } + + /** + * @param key must not be {@literal null}. + * @param expiration the {@link org.springframework.data.redis.core.types.Expiration} to apply. + * @param options additional options to be sent along with the command. + * @param fields the names of the {@literal fields} to apply the {@literal expiration} to. + * @return a {@link List} holding the command result for each field in order - {@code 2} indicating the specific field + * is deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration + * time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT + * condition is not met); {@code -2} indicating there is no such field; + * @since 3.5 + */ + @Nullable + default List applyExpiration(byte[] key, org.springframework.data.redis.core.types.Expiration expiration, + FieldExpirationOptions options, byte[]... fields) { + + if (expiration.isPersistent()) { + return hPersist(key, fields); + } + + if (ObjectUtils.nullSafeEquals(FieldExpirationOptions.none(), options)) { + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), fields); + } + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), fields); + } + return hExpire(key, expiration.getExpirationTimeInSeconds(), fields); + } + + if (ObjectUtils.nullSafeEquals(TimeUnit.MILLISECONDS, expiration.getTimeUnit())) { + if (expiration.isUnixTimestamp()) { + return hpExpireAt(key, expiration.getExpirationTimeInMilliseconds(), options.getCondition(), fields); + } + + return hpExpire(key, expiration.getExpirationTimeInMilliseconds(), options.getCondition(), fields); + } + + if (expiration.isUnixTimestamp()) { + return hExpireAt(key, expiration.getExpirationTimeInSeconds(), options.getCondition(), fields); + } + + return hExpire(key, expiration.getExpirationTimeInSeconds(), options.getCondition(), fields); + } + + /** + * Set time to live for given {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + default List hExpire(byte[] key, long seconds, byte[]... fields) { + return hExpire(key, seconds, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set time to live for given {@code fields}. + * + * @param key must not be {@literal null}. + * @param ttl the amount of time after which the fields will be expired in {@link Duration#toSeconds() seconds} + * precision, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + default List hExpire(byte[] key, Duration ttl, byte[]... fields) { + return hExpire(key, ttl.toSeconds(), fields); + } + + /** + * Set time to live for given {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the fields will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields); + + /** + * Set time to live for given {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the fields will be expired in milliseconds, must not be + * {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set ; {@code -2} indicating there is no + * such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + default List hpExpire(byte[] key, long millis, byte[]... fields) { + return hpExpire(key, millis, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set time to live for given {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param ttl the amount of time after which the fields will be expired in {@link Duration#toMillis() milliseconds} + * precision, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + default List hpExpire(byte[] key, Duration ttl, byte[]... fields) { + return hpExpire(key, ttl.toMillis(), fields); + } + + /** + * Set time to live for given {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the fields will be expired in milliseconds, must not be + * {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + @Nullable + default List hExpireAt(byte[] key, long unixTime, byte[]... fields) { + return hExpireAt(key, unixTime, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + @Nullable + List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + @Nullable + default List hpExpireAt(byte[] key, long unixTimeInMillis, byte[]... fields) { + return hpExpireAt(key, unixTimeInMillis, FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param condition the condition for expiration, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + @Nullable + List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields); + + /** + * Remove the expiration from given {@code field}. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + List hPersist(byte[] key, byte[]... fields); + + /** + * Get the time to live for {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the field exists but has no associated + * expiration time. The command returns {@code -2} if the field does not exist; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hTtl(byte[] key, byte[]... fields); + + /** + * Get the time to live for {@code fields} in and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return for each of the fields supplied - the time to live in the {@link TimeUnit} provided; or a negative value to + * signal an error. The command returns {@code -1} if the key exists but has no associated expiration time. + * The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields); + + /** + * Get the time to live for {@code fields} in milliseconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in seconds; or a negative + * value to signal an error. The command returns {@code -1} if the key exists but has no associated expiration + * time. The command returns {@code -2} if the key does not exist; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hpTtl(byte[] key, byte[]... fields); } diff --git a/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java b/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java index 414f178d92..49326637d3 100644 --- a/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/RedisKeyCommands.java @@ -16,6 +16,7 @@ package org.springframework.data.redis.connection; import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -191,6 +192,20 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean expire(byte[] key, long seconds); + /** + * Set time to live for given {@code key} using {@link Duration#toSeconds() seconds} precision. + * + * @param key must not be {@literal null}. + * @param duration + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: EXPIRE + * @since 3.5 + */ + @Nullable + default Boolean expire(byte[] key, Duration duration) { + return expire(key, duration.toSeconds()); + } + /** * Set time to live for given {@code key} in milliseconds. * @@ -202,6 +217,20 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean pExpire(byte[] key, long millis); + /** + * Set time to live for given {@code key} using {@link Duration#toMillis() milliseconds} precision. + * + * @param key must not be {@literal null}. + * @param duration + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: PEXPIRE + * @since 3.5 + */ + @Nullable + default Boolean pExpire(byte[] key, Duration duration) { + return pExpire(key, duration.toMillis()); + } + /** * Set the expiration for given {@code key} as a {@literal UNIX} timestamp. * @@ -213,6 +242,21 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean expireAt(byte[] key, long unixTime); + /** + * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in {@link Instant#getEpochSecond() seconds} + * precision. + * + * @param key must not be {@literal null}. + * @param unixTime + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: EXPIREAT + * @since 3.5 + */ + @Nullable + default Boolean expireAt(byte[] key, Instant unixTime) { + return expireAt(key, unixTime.getEpochSecond()); + } + /** * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in milliseconds. * @@ -224,6 +268,21 @@ default Cursor scan(KeyScanOptions options) { @Nullable Boolean pExpireAt(byte[] key, long unixTimeInMillis); + /** + * Set the expiration for given {@code key} as a {@literal UNIX} timestamp in {@link Instant#toEpochMilli() + * milliseconds} precision. + * + * @param key must not be {@literal null}. + * @param unixTime + * @return {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: PEXPIREAT + * @since 3.5 + */ + @Nullable + default Boolean pExpireAt(byte[] key, Instant unixTime) { + return pExpireAt(key, unixTime.toEpochMilli()); + } + /** * Remove the expiration from given {@code key}. * diff --git a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java index 2c286ce97e..1069e430c8 100644 --- a/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/StringRedisConnection.java @@ -71,7 +71,6 @@ * @author Andrey Shlykov * @author ihaohong * @author Shyngys Sapraliyev - * * @see RedisCallback * @see RedisSerializer * @see StringRedisTemplate @@ -1661,7 +1660,6 @@ default Long lPos(String key, String element) { */ Long zRemRange(String key, long start, long end); - /** * Remove all elements between the lexicographical {@link Range}. * @@ -1941,7 +1939,8 @@ default Set zUnionWithScores(Aggregate aggregate, int[] weights, St * @return * @since 1.6 * @see Redis Documentation: ZRANGEBYLEX - * @see RedisZSetCommands#zRangeByLex(byte[], org.springframework.data.domain.Range, org.springframework.data.redis.connection.Limit) + * @see RedisZSetCommands#zRangeByLex(byte[], org.springframework.data.domain.Range, + * org.springframework.data.redis.connection.Limit) */ Set zRangeByLex(String key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit); @@ -1983,7 +1982,8 @@ default Set zRevRangeByLex(String key, org.springframework.data.domain.R * @return * @since 2.4 * @see Redis Documentation: ZREVRANGEBYLEX - * @see RedisZSetCommands#zRevRangeByLex(byte[], org.springframework.data.domain.Range, org.springframework.data.redis.connection.Limit) + * @see RedisZSetCommands#zRevRangeByLex(byte[], org.springframework.data.domain.Range, + * org.springframework.data.redis.connection.Limit) */ Set zRevRangeByLex(String key, org.springframework.data.domain.Range range, org.springframework.data.redis.connection.Limit limit); @@ -2333,6 +2333,208 @@ Long zRangeStoreRevByScore(String dstKey, String srcKey, @Nullable Long hStrLen(String key, String field); + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + default List hExpire(String key, long seconds, String... fields) { + return hExpire(key, seconds, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set time to live for given {@code field} in seconds. + * + * @param key must not be {@literal null}. + * @param seconds the amount of time after which the key will be expired in seconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + List hExpire(String key, long seconds, Hash.FieldExpirationOptions.Condition condition, String... fields); + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + default List hpExpire(String key, long millis, String... fields) { + return hpExpire(key, millis, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set time to live for given {@code field} in milliseconds. + * + * @param key must not be {@literal null}. + * @param millis the amount of time after which the key will be expired in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is 0; {@code 1} indicating expiration time + * is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | GT | LT condition + * is not met); {@code -2} indicating there is no such field; {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HPEXPIRE + * @since 3.5 + */ + @Nullable + List hpExpire(String key, long millis, Hash.FieldExpirationOptions.Condition condition, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + @Nullable + default List hExpireAt(String key, long unixTime, String... fields) { + return hExpireAt(key, unixTime, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp. + * + * @param key must not be {@literal null}. + * @param unixTime the moment in time in which the field expires, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HEXPIREAT + * @since 3.5 + */ + @Nullable + List hExpireAt(String key, long unixTime, Hash.FieldExpirationOptions.Condition condition, String... fields); + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set; {@code -2} indicating + * there is no such field; {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + @Nullable + default List hpExpireAt(String key, long unixTimeInMillis, String... fields) { + return hpExpireAt(key, unixTimeInMillis, Hash.FieldExpirationOptions.Condition.ALWAYS, fields); + } + + /** + * Set the expiration for given {@code field} as a {@literal UNIX} timestamp in milliseconds. + * + * @param key must not be {@literal null}. + * @param unixTimeInMillis the moment in time in which the field expires in milliseconds, must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 2} indicating the specific field is + * deleted already due to expiration, or provided expiry interval is in the past; {@code 1} indicating + * expiration time is set/updated; {@code 0} indicating the expiration time is not set (a provided NX | XX | + * GT | LT condition is not met); {@code -2} indicating there is no such field; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HPEXPIREAT + * @since 3.5 + */ + @Nullable + List hpExpireAt(String key, long unixTimeInMillis, Hash.FieldExpirationOptions.Condition condition, + String... fields); + + /** + * Remove the expiration from given {@code field}. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: {@code 1} indicating expiration time is + * removed; {@code -1} field has no expiration time to be removed; {@code -2} indicating there is no such + * field; {@literal null} when used in pipeline / transaction.{@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + List hPersist(String key, String... fields); + + /** + * Get the time to live for {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a + * negative value to signal an error. The command returns {@code -1} if the key exists but has no associated + * expiration time. The command returns {@code -2} if the key does not exist; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hTtl(String key, String... fields); + + /** + * Get the time to live for {@code fields} in and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in the {@link TimeUnit} + * provided; or a negative value to signal an error. The command returns {@code -1} if the key exists but has + * no associated expiration time. The command returns {@code -2} if the key does not exist; {@literal null} + * when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hTtl(String key, TimeUnit timeUnit, String... fields); + + /** + * Get the time to live for {@code fields} in seconds. + * + * @param key must not be {@literal null}. + * @param fields must not be {@literal null}. + * @return a list of {@link Long} values for each of the fields provided: the time to live in milliseconds; or a + * negative value to signal an error. The command returns {@code -1} if the key exists but has no associated + * expiration time. The command returns {@code -2} if the key does not exist; {@literal null} when used in + * pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + List hpTtl(String key, String... fields); + // ------------------------------------------------------------------------- // Methods dealing with HyperLogLog // ------------------------------------------------------------------------- @@ -2556,8 +2758,7 @@ GeoResults> geoRadiusByMember(String key, String member, Dis /** * Return the members of a geo set which are within the borders of the area specified by a given {@link GeoShape - * shape}. The query's center point is provided by - * {@link GeoReference}. + * shape}. The query's center point is provided by {@link GeoReference}. * * @param key must not be {@literal null}. * @param reference must not be {@literal null}. @@ -2573,8 +2774,7 @@ GeoResults> geoSearch(String key, GeoReference refer /** * Query the members of a geo set which are within the borders of the area specified by a given {@link GeoShape shape} - * and store the result at {@code destKey}. The query's center point is provided by - * {@link GeoReference}. + * and store the result at {@code destKey}. The query's center point is provided by {@link GeoReference}. * * @param key must not be {@literal null}. * @param reference must not be {@literal null}. diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java index 47ad6c6eec..1223ab4c06 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisClusterHashCommands.java @@ -15,6 +15,7 @@ */ package org.springframework.data.redis.connection.jedis; +import redis.clients.jedis.args.ExpiryOption; import redis.clients.jedis.params.ScanParams; import redis.clients.jedis.resps.ScanResult; @@ -23,8 +24,10 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.DataAccessException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisHashCommands; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.ScanCursor; @@ -39,6 +42,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author John Blum + * @author Tihomir Mateev * @since 2.0 */ class JedisClusterHashCommands implements RedisHashCommands { @@ -279,14 +283,136 @@ protected ScanIteration> doScan(CursorId cursorId, ScanOpt ScanParams params = JedisConverters.toScanParams(options); - ScanResult> result = connection.getCluster().hscan(key, - JedisConverters.toBytes(cursorId), + ScanResult> result = connection.getCluster().hscan(key, JedisConverters.toBytes(cursorId), params); return new ScanIteration<>(CursorId.of(result.getCursor()), result.getResult()); } }.open(); } + @Override + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hexpire(key, seconds, fields); + } + + return connection.getCluster().hexpire(key, seconds, ExpiryOption.valueOf(condition.name()), fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hpexpire(key, millis, fields); + } + + return connection.getCluster().hpexpire(key, millis, ExpiryOption.valueOf(condition.name()), fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hexpireAt(key, unixTime, fields); + } + + return connection.getCluster().hexpireAt(key, unixTime, ExpiryOption.valueOf(condition.name()), fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.getCluster().hpexpireAt(key, unixTimeInMillis, fields); + } + + return connection.getCluster().hpexpireAt(key, unixTimeInMillis, ExpiryOption.valueOf(condition.name()), fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpersist(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().httl(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().httl(key, fields).stream() + .map(it -> it != null ? timeUnit.convert(it, TimeUnit.SECONDS) : null).toList(); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + + @Override + public List hpTtl(byte[] key, byte[]... fields) { + + Assert.notNull(key, "Key must not be null"); + Assert.notNull(fields, "Fields must not be null"); + + try { + return connection.getCluster().hpttl(key, fields); + } catch (Exception ex) { + throw convertJedisAccessException(ex); + } + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { @@ -298,7 +424,7 @@ public Long hStrLen(byte[] key, byte[] field) { } private DataAccessException convertJedisAccessException(Exception ex) { - return connection.convertJedisAccessException(ex); } + } diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java index a0ac8debf2..2e83d8aba0 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisHashCommands.java @@ -16,6 +16,7 @@ package org.springframework.data.redis.connection.jedis; import redis.clients.jedis.Jedis; +import redis.clients.jedis.args.ExpiryOption; import redis.clients.jedis.commands.PipelineBinaryCommands; import redis.clients.jedis.params.ScanParams; import redis.clients.jedis.resps.ScanResult; @@ -25,8 +26,10 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisHashCommands; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.core.Cursor; @@ -43,6 +46,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author John Blum + * @author Tihomir Mateev * @since 2.0 */ class JedisHashCommands implements RedisHashCommands { @@ -150,7 +154,8 @@ public List> hRandFieldWithValues(byte[] key, long count) List> convertedMapEntryList = new ArrayList<>(mapEntryList.size()); - mapEntryList.forEach(entry -> convertedMapEntryList.add(Converters.entryOf(entry.getKey(), entry.getValue()))); + mapEntryList + .forEach(entry -> convertedMapEntryList.add(Converters.entryOf(entry.getKey(), entry.getValue()))); return convertedMapEntryList; @@ -237,8 +242,8 @@ protected ScanIteration> doScan(byte[] key, CursorId curso ScanParams params = JedisConverters.toScanParams(options); - ScanResult> result = connection.getJedis().hscan(key, - JedisConverters.toBytes(cursorId), params); + ScanResult> result = connection.getJedis().hscan(key, JedisConverters.toBytes(cursorId), + params); return new ScanIteration<>(CursorId.of(result.getCursor()), result.getResult()); } @@ -250,6 +255,74 @@ protected void doClose() { }.open(); } + @Override + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, fields); + } + + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hexpire, PipelineBinaryCommands::hexpire, key, seconds, option, fields); + } + + @Override + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, fields); + } + + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hpexpire, PipelineBinaryCommands::hpexpire, key, millis, option, fields); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, fields); + } + + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hexpireAt, PipelineBinaryCommands::hexpireAt, key, unixTime, option, fields); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + + if (condition == FieldExpirationOptions.Condition.ALWAYS) { + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, + fields); + } + + ExpiryOption option = ExpiryOption.valueOf(condition.name()); + return connection.invoke().just(Jedis::hpexpireAt, PipelineBinaryCommands::hpexpireAt, key, unixTimeInMillis, + fields); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::hpersist, PipelineBinaryCommands::hpersist, key, fields); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::httl, PipelineBinaryCommands::httl, key, fields); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return connection.invoke().fromMany(Jedis::httl, PipelineBinaryCommands::httl, key, fields) + .toList(Converters.secondsToTimeUnit(timeUnit)); + } + + @Override + public List hpTtl(byte[] key, byte[]... fields) { + return connection.invoke().just(Jedis::hpttl, PipelineBinaryCommands::hpttl, key, fields); + } + @Nullable @Override public Long hStrLen(byte[] key, byte[] field) { diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java index fc8460d514..fe646b3a1b 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConnection.java @@ -1160,6 +1160,14 @@ static class TypeHints { COMMAND_OUTPUT_TYPE_MAPPING.put(PFMERGE, IntegerOutput.class); COMMAND_OUTPUT_TYPE_MAPPING.put(PFADD, IntegerOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HEXPIRE, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HEXPIREAT, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPEXPIRE, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPEXPIREAT, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPERSIST, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HTTL, IntegerListOutput.class); + COMMAND_OUTPUT_TYPE_MAPPING.put(HPTTL, IntegerListOutput.class); + // DOUBLE COMMAND_OUTPUT_TYPE_MAPPING.put(HINCRBYFLOAT, DoubleOutput.class); COMMAND_OUTPUT_TYPE_MAPPING.put(INCRBYFLOAT, DoubleOutput.class); diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java index e4b53f4fb4..032d6230d6 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceHashCommands.java @@ -15,17 +15,21 @@ */ package org.springframework.data.redis.connection.lettuce; +import io.lettuce.core.ExpireArgs; import io.lettuce.core.KeyValue; import io.lettuce.core.MapScanCursor; import io.lettuce.core.ScanArgs; import io.lettuce.core.api.async.RedisHashAsyncCommands; +import io.lettuce.core.protocol.CommandArgs; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisHashCommands; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.core.Cursor; @@ -35,10 +39,12 @@ import org.springframework.data.redis.core.ScanOptions; import org.springframework.lang.Nullable; import org.springframework.util.Assert; +import org.springframework.util.ObjectUtils; /** * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev * @since 2.0 */ class LettuceHashCommands implements RedisHashCommands { @@ -208,6 +214,51 @@ public Cursor> hScan(byte[] key, ScanOptions options) { return hScan(key, CursorId.initial(), options); } + @Override + public List hExpire(byte[] key, long seconds, FieldExpirationOptions.Condition condition, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hexpire, key, seconds, getExpireArgs(condition), fields) + .toList(); + } + + @Override + public List hpExpire(byte[] key, long millis, FieldExpirationOptions.Condition condition, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpexpire, key, millis, getExpireArgs(condition), fields) + .toList(); + } + + @Override + public List hExpireAt(byte[] key, long unixTime, FieldExpirationOptions.Condition condition, byte[]... fields) { + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hexpireat, key, unixTime, getExpireArgs(condition), fields).toList(); + } + + @Override + public List hpExpireAt(byte[] key, long unixTimeInMillis, FieldExpirationOptions.Condition condition, + byte[]... fields) { + return connection.invoke() + .fromMany(RedisHashAsyncCommands::hpexpireat, key, unixTimeInMillis, getExpireArgs(condition), fields).toList(); + } + + @Override + public List hPersist(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpersist, key, fields).toList(); + } + + @Override + public List hTtl(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields).toList(); + } + + @Override + public List hTtl(byte[] key, TimeUnit timeUnit, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::httl, key, fields) + .toList(Converters.secondsToTimeUnit(timeUnit)); + } + + @Override + public List hpTtl(byte[] key, byte[]... fields) { + return connection.invoke().fromMany(RedisHashAsyncCommands::hpttl, key, fields).toList(); + } /** * @param key @@ -263,4 +314,19 @@ private static Entry toEntry(KeyValue value) { return value.hasValue() ? Converters.entryOf(value.getKey(), value.getValue()) : null; } + private ExpireArgs getExpireArgs(FieldExpirationOptions.Condition condition) { + + return new ExpireArgs() { + @Override + public void build(CommandArgs args) { + + if (ObjectUtils.nullSafeEquals(condition, FieldExpirationOptions.Condition.ALWAYS)) { + return; + } + + args.add(condition.name()); + } + }; + } + } diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java index b704321ef5..84dd2ca906 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommands.java @@ -15,8 +15,10 @@ */ package org.springframework.data.redis.connection.lettuce; +import io.lettuce.core.ExpireArgs; import io.lettuce.core.KeyValue; import io.lettuce.core.ScanStream; +import io.lettuce.core.protocol.CommandArgs; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -25,10 +27,11 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.reactivestreams.Publisher; - +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveHashCommands; import org.springframework.data.redis.connection.ReactiveRedisConnection.BooleanResponse; import org.springframework.data.redis.connection.ReactiveRedisConnection.CommandResponse; @@ -38,6 +41,7 @@ import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.connection.convert.Converters; import org.springframework.util.Assert; +import org.springframework.util.ObjectUtils; /** * @author Christoph Strobl @@ -264,6 +268,93 @@ public Flux> hStrLen(Publisher> applyExpiration(Publisher commands) { + + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + ByteBuffer[] fields = command.getFields().toArray(ByteBuffer[]::new); + + if (command.getExpiration().isPersistent()) { + return cmd.hpersist(command.getKey(), fields).map(value -> new NumericResponse<>(command, value)); + } + + ExpireArgs args = new ExpireArgs() { + + @Override + public void build(CommandArgs args) { + super.build(args); + if (ObjectUtils.nullSafeEquals(command.getOptions(), FieldExpirationOptions.none())) { + return; + } + + args.add(command.getOptions().getCondition().name()); + } + }; + + if (command.getExpiration().isUnixTimestamp()) { + + if (command.getExpiration().getTimeUnit().equals(TimeUnit.MILLISECONDS)) { + return cmd + .hpexpireat(command.getKey(), command.getExpiration().getExpirationTimeInMilliseconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + } + return cmd.hexpireat(command.getKey(), command.getExpiration().getExpirationTimeInSeconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + } + + if (command.getExpiration().getTimeUnit().equals(TimeUnit.MILLISECONDS)) { + return cmd.hpexpire(command.getKey(), command.getExpiration().getExpirationTimeInMilliseconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + } + + return cmd.hexpire(command.getKey(), command.getExpiration().getExpirationTimeInSeconds(), args, fields) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hPersist(Publisher commands) { + + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpersist(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hTtl(Publisher commands) { + + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.httl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + + @Override + public Flux> hpTtl(Publisher commands) { + + return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { + + Assert.notNull(command.getKey(), "Key must not be null"); + Assert.notNull(command.getFields(), "Fields must not be null"); + + return cmd.hpttl(command.getKey(), command.getFields().toArray(ByteBuffer[]::new)) + .map(value -> new NumericResponse<>(command, value)); + })); + } + private static Map.Entry toEntry(KeyValue kv) { return new Entry() { diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java new file mode 100644 index 0000000000..27779a88a8 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/BoundHashFieldExpirationOperations.java @@ -0,0 +1,111 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.TimeUnit; + +import org.springframework.data.redis.connection.Hash; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.lang.Nullable; + +/** + * Hash Field Expiration operations bound to a certain hash key and set of hash fields. + * + * @param type of the hash field names. + * @author Mark Paluch + * @since 3.5 + */ +public interface BoundHashFieldExpirationOperations { + + /** + * Apply {@link Expiration} to the hash without any additional constraints. + * + * @param expiration the expiration definition. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + */ + default ExpireChanges expire(Expiration expiration) { + return expire(expiration, Hash.FieldExpirationOptions.none()); + } + + /** + * Apply {@link Expiration} to the hash fields given {@link Hash.FieldExpirationOptions expiration options}. + * + * @param expiration the expiration definition. + * @param options expiration options. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + */ + ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options); + + /** + * Set time to live for given {@code hashKey}. + * + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expire(Duration timeout); + + /** + * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. + * + * @param expireAt must not be {@literal null}. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expireAt(Instant expireAt); + + /** + * Remove the expiration from given {@code hashKey} . + * + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + ExpireChanges persist(); + + /** + * Get the time to live for {@code hashKey} in seconds. + * + * @return the actual expirations in seconds for the hash fields. {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Expirations getTimeToLive(); + + /** + * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. + * + * @param timeUnit must not be {@literal null}. + * @return the actual expirations for the hash fields in the given time unit. {@literal null} when used in pipeline / + * transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Expirations getTimeToLive(TimeUnit timeUnit); + +} diff --git a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java index f906462911..0d287e929f 100644 --- a/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/BoundHashOperations.java @@ -15,6 +15,7 @@ */ package org.springframework.data.redis.core; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -29,6 +30,7 @@ * @author Christoph Strobl * @author Ninad Divadkar * @author Mark Paluch + * @author Tihomir Mateev */ public interface BoundHashOperations extends BoundKeyOperations { @@ -90,7 +92,7 @@ public interface BoundHashOperations extends BoundKeyOperations { Double increment(HK key, double delta); /** - * Return a random key (aka field) from the hash stored at the bound key. + * Return a random key from the hash stored at the bound key. * * @return {@literal null} if the hash does not exist or when used in pipeline / transaction. * @since 2.6 @@ -110,10 +112,10 @@ public interface BoundHashOperations extends BoundKeyOperations { Map.Entry randomEntry(); /** - * Return a random keys (aka fields) from the hash stored at the bound key. If the provided {@code count} argument is - * positive, return a list of distinct keys, capped either at {@code count} or the hash size. If {@code count} is - * negative, the behavior changes and the command is allowed to return the same key multiple times. In this case, the - * number of returned keys is the absolute value of the specified count. + * Return a random keys from the hash stored at the bound key. If the provided {@code count} argument is positive, + * return a list of distinct keys, capped either at {@code count} or the hash size. If {@code count} is negative, the + * behavior changes and the command is allowed to return the same key multiple times. In this case, the number of + * returned keys is the absolute value of the specified count. * * @param count number of keys to return. * @return {@literal null} if key does not exist or when used in pipeline / transaction. @@ -213,8 +215,45 @@ public interface BoundHashOperations extends BoundKeyOperations { */ Cursor> scan(ScanOptions options); + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration + * operation. + * + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration() { + return new DefaultBoundHashFieldExpirationOperations<>(getOperations().opsForHash(), getKey(), this::keys); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(HK... hashFields) { + return expiration(Arrays.asList(hashFields)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(Collection hashFields) { + return new DefaultBoundHashFieldExpirationOperations<>(getOperations().opsForHash(), getKey(), () -> hashFields); + } + /** * @return never {@literal null}. */ RedisOperations getOperations(); + } diff --git a/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java new file mode 100644 index 0000000000..8dbabe6dd5 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/DefaultBoundHashFieldExpirationOperations.java @@ -0,0 +1,93 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.time.Duration; +import java.time.Instant; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import org.springframework.data.redis.connection.Hash; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.lang.Nullable; +import org.springframework.util.Assert; + +/** + * Default implementation of {@link BoundHashFieldExpirationOperations}. + * + * @author Mark Paluch + * @since 3.5 + */ +class DefaultBoundHashFieldExpirationOperations implements BoundHashFieldExpirationOperations { + + private final HashOperations operations; + private final H key; + private final Supplier> hashFields; + + public DefaultBoundHashFieldExpirationOperations(HashOperations operations, H key, + Supplier> hashFields) { + + this.operations = operations; + this.key = key; + this.hashFields = hashFields; + } + + @Override + public ExpireChanges expire(Expiration expiration, Hash.FieldExpirationOptions options) { + return operations.expire(key, expiration, options, getHashKeys()); + } + + @Nullable + @Override + public ExpireChanges expire(Duration timeout) { + return operations.expire(key, timeout, getHashKeys()); + } + + @Nullable + @Override + public ExpireChanges expireAt(Instant expireAt) { + return operations.expireAt(key, expireAt, getHashKeys()); + } + + @Nullable + @Override + public ExpireChanges persist() { + return operations.persist(key, getHashKeys()); + } + + @Nullable + @Override + public Expirations getTimeToLive() { + return operations.getTimeToLive(key, getHashKeys()); + } + + @Nullable + @Override + public Expirations getTimeToLive(TimeUnit timeUnit) { + return operations.getTimeToLive(key, timeUnit, getHashKeys()); + } + + private Collection getHashKeys() { + + Collection hks = hashFields.get(); + + Assert.state(hks != null, "Hash keys must not be null"); + return hks; + } + +} diff --git a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java index 974e20e13f..804617616f 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultHashOperations.java @@ -15,6 +15,8 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; @@ -22,9 +24,14 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.springframework.core.convert.converter.Converter; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.convert.Converters; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.data.redis.core.types.Expirations.Timeouts; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -34,6 +41,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Ninad Divadkar + * @author Tihomir Mateev */ class DefaultHashOperations extends AbstractOperations implements HashOperations { @@ -210,6 +218,86 @@ public Boolean putIfAbsent(K key, HK hashKey, HV value) { return execute(connection -> connection.hSetNX(rawKey, rawHashKey, rawHashValue)); } + @Override + public ExpireChanges expire(K key, Duration duration, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + boolean hasMillis = TimeoutUtils.hasMillis(duration); + + List raw = execute(connection -> TimeoutUtils.hasMillis(duration) + ? connection.hashCommands().hpExpire(rawKey, duration.toMillis(), rawHashKeys) + : connection.hashCommands().hExpire(rawKey, TimeoutUtils.toSeconds(duration), rawHashKeys)); + + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; + } + + @Override + public ExpireChanges expireAt(K key, Instant instant, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + long millis = instant.toEpochMilli(); + + List raw = execute(connection -> TimeoutUtils.containsSplitSecond(millis) + ? connection.hashCommands().hpExpireAt(rawKey, millis, rawHashKeys) + : connection.hashCommands().hExpireAt(rawKey, instant.getEpochSecond(), rawHashKeys)); + + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; + } + + @Override + public ExpireChanges expire(K key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + List raw = execute( + connection -> connection.hashCommands().applyExpiration(rawKey, expiration, options, rawHashKeys)); + + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; + } + + @Override + public ExpireChanges persist(K key, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + + List raw = execute(connection -> connection.hashCommands().hPersist(rawKey, rawHashKeys)); + + return raw != null ? ExpireChanges.of(orderedKeys, raw) : null; + } + + @Override + public Expirations getTimeToLive(K key, TimeUnit timeUnit, Collection hashKeys) { + + if(timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) { + throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit)); + } + + List orderedKeys = List.copyOf(hashKeys); + + byte[] rawKey = rawKey(key); + byte[][] rawHashKeys = rawHashKeys(orderedKeys.toArray()); + + List raw = execute( + connection -> TimeUnit.MILLISECONDS.equals(timeUnit) ? connection.hashCommands().hpTtl(rawKey, rawHashKeys) + : connection.hashCommands().hTtl(rawKey, timeUnit, rawHashKeys)); + + if (raw == null) { + return null; + } + + Timeouts timeouts = new Timeouts(TimeUnit.MILLISECONDS.equals(timeUnit) ? timeUnit : TimeUnit.SECONDS, raw); + return Expirations.of(timeUnit, orderedKeys, timeouts); + } + @Override public List values(K key) { diff --git a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java index c3e004c25d..540b351778 100644 --- a/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/DefaultReactiveHashOperations.java @@ -19,16 +19,25 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.reactivestreams.Publisher; import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.ReactiveHashCommands; +import org.springframework.data.redis.connection.ReactiveHashCommands.ExpireCommand; +import org.springframework.data.redis.connection.ReactiveRedisConnection.NumericResponse; import org.springframework.data.redis.connection.convert.Converters; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.data.redis.core.types.Expirations.Timeouts; import org.springframework.data.redis.serializer.RedisSerializationContext; import org.springframework.lang.Nullable; import org.springframework.util.Assert; @@ -63,8 +72,7 @@ public Mono remove(H key, Object... hashKeys) { Assert.noNullElements(hashKeys, "Hash keys must not contain null elements"); return createMono(hashCommands -> Flux.fromArray(hashKeys) // - .map(hashKey -> (HK) hashKey) - .map(this::rawHashKey) // + .map(hashKey -> (HK) hashKey).map(this::rawHashKey) // .collectList() // .flatMap(hks -> hashCommands.hDel(rawKey(key), hks))); } @@ -86,8 +94,8 @@ public Mono get(H key, Object hashKey) { Assert.notNull(key, "Key must not be null"); Assert.notNull(hashKey, "Hash key must not be null"); - return createMono(hashCommands -> hashCommands.hGet(rawKey(key), rawHashKey((HK) hashKey)) - .map(this::readHashValue)); + return createMono( + hashCommands -> hashCommands.hGet(rawKey(key), rawHashKey((HK) hashKey)).map(this::readHashValue)); } @Override @@ -109,8 +117,8 @@ public Mono increment(H key, HK hashKey, long delta) { Assert.notNull(key, "Key must not be null"); Assert.notNull(hashKey, "Hash key must not be null"); - return template.doCreateMono(connection -> connection.numberCommands() - .hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); + return template + .doCreateMono(connection -> connection.numberCommands().hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); } @Override @@ -119,8 +127,8 @@ public Mono increment(H key, HK hashKey, double delta) { Assert.notNull(key, "Key must not be null"); Assert.notNull(hashKey, "Hash key must not be null"); - return template.doCreateMono(connection -> connection.numberCommands() - .hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); + return template + .doCreateMono(connection -> connection.numberCommands().hIncrBy(rawKey(key), rawHashKey(hashKey), delta)); } @Override @@ -137,8 +145,7 @@ public Mono> randomEntry(H key) { Assert.notNull(key, "Key must not be null"); - return createMono(hashCommands -> hashCommands.hRandFieldWithValues(rawKey(key))) - .map(this::deserializeHashEntry); + return createMono(hashCommands -> hashCommands.hRandFieldWithValues(rawKey(key))).map(this::deserializeHashEntry); } @Override @@ -235,6 +242,81 @@ public Flux> scan(H key, ScanOptions options) { .map(this::deserializeHashEntry)); } + @Override + public Mono> expire(H key, Duration timeout, Collection hashKeys) { + return expire(key, Expiration.from(timeout), FieldExpirationOptions.none(), hashKeys); + } + + @Override + public Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, + Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> { + return connection + .applyExpiration(Mono.just(ExpireCommand.expire(rawHashKeys, expiration).from(rawKey).withOptions(options))) + .map(NumericResponse::getOutput); + }).collectList(); + + return raw.map(values -> ExpireChanges.of(orderedKeys, values)); + } + + @Nullable + @Override + public Mono> expireAt(H key, Instant expireAt, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> connection.hExpireAt(rawKey, expireAt, rawHashKeys)).collectList(); + + return raw.map(values -> ExpireChanges.of(orderedKeys, values)); + } + + @Nullable + @Override + public Mono> persist(H key, Collection hashKeys) { + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> connection.hPersist(rawKey, rawHashKeys)).collectList(); + + return raw.map(values -> ExpireChanges.of(orderedKeys, values)); + } + + @Nullable + @Override + public Mono> getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys) { + + if (timeUnit.compareTo(TimeUnit.MILLISECONDS) < 0) { + throw new IllegalArgumentException("%s precision is not supported must be >= MILLISECONDS".formatted(timeUnit)); + } + + List orderedKeys = List.copyOf(hashKeys); + ByteBuffer rawKey = rawKey(key); + List rawHashKeys = orderedKeys.stream().map(this::rawHashKey).toList(); + + Mono> raw = createFlux(connection -> { + + if (TimeUnit.MILLISECONDS.equals(timeUnit)) { + return connection.hpTtl(rawKey, rawHashKeys); + } + return connection.hTtl(rawKey, rawHashKeys); + }).collectList(); + + return raw.map(values -> { + + Timeouts timeouts = new Timeouts(TimeUnit.MILLISECONDS.equals(timeUnit) ? timeUnit : TimeUnit.SECONDS, values); + return Expirations.of(timeUnit, orderedKeys, timeouts); + }); + } + @Override public Mono delete(H key) { diff --git a/src/main/java/org/springframework/data/redis/core/ExpireChanges.java b/src/main/java/org/springframework/data/redis/core/ExpireChanges.java new file mode 100644 index 0000000000..029922d96e --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/ExpireChanges.java @@ -0,0 +1,199 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core; + +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.springframework.util.Assert; +import org.springframework.util.CollectionUtils; + +/** + * Value Object linking a number of keys to their {@link ExpiryChangeState} retaining the order of the original source. + * Dedicated higher level methods interpret raw values retrieved from a Redis Client. + *
    + *
  1. {@link #ok()} returns keys for which the time to live has been set
  2. + *
  3. {@link #expired()} returns keys that have been expired
  4. + *
  5. {@link #missed()} returns keys for which the time to live could not be set because they do not exist
  6. + *
  7. {@link #skipped()} returns keys for which the time to live has not been set because a precondition was not + * met
  8. + *
+ * + * @author Christoph Strobl + * @since 3.5 + */ +public class ExpireChanges { + + private final Map changes; + + ExpireChanges(Map changes) { + this.changes = changes; + } + + /** + * Factory Method to create {@link ExpireChanges} from raw sources. + * + * @param fields the fields to associated with the raw values in states. Defines the actual order of entries within + * {@link ExpireChanges}. + * @param states the raw Redis state change values. + * @return new instance of {@link ExpireChanges}. + * @param the key type used + */ + public static ExpireChanges of(List fields, List states) { + + Assert.isTrue(fields.size() == states.size(), "Keys and States must have the same number of elements"); + + if (fields.size() == 1) { + return new ExpireChanges<>(Map.of(fields.iterator().next(), stateFromValue(states.iterator().next()))); + } + + Map target = CollectionUtils.newLinkedHashMap(fields.size()); + for (int i = 0; i < fields.size(); i++) { + target.put(fields.get(i), stateFromValue(states.get(i))); + } + return new ExpireChanges<>(Collections.unmodifiableMap(target)); + } + + /** + * @return an ordered {@link List} of the status changes. + */ + public List stateChanges() { + return List.copyOf(changes.values()); + } + + /** + * @return the status change for the given {@literal key}, or {@literal null} if {@link ExpiryChangeState} does not + * contain an entry for it. + */ + public ExpiryChangeState stateOf(K key) { + return changes.get(key); + } + + /** + * @return {@literal true} if all changes are {@link ExpiryChangeState#OK}. + */ + public boolean allOk() { + return allMach(ExpiryChangeState.OK::equals); + } + + /** + * @return {@literal true} if all changes are either ok {@link ExpiryChangeState#OK} or + * {@link ExpiryChangeState#EXPIRED}. + */ + public boolean allChanged() { + return allMach(it -> ExpiryChangeState.OK.equals(it) || ExpiryChangeState.EXPIRED.equals(it)); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#OK}. + */ + public Set ok() { + return filterByState(ExpiryChangeState.OK); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#EXPIRED}. + */ + public Set expired() { + return filterByState(ExpiryChangeState.EXPIRED); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#DOES_NOT_EXIST}. + */ + public Set missed() { + return filterByState(ExpiryChangeState.DOES_NOT_EXIST); + } + + /** + * @return an ordered list of if all changes are {@link ExpiryChangeState#CONDITION_NOT_MET}. + */ + public Set skipped() { + return filterByState(ExpiryChangeState.CONDITION_NOT_MET); + } + + public boolean allMach(Predicate predicate) { + return changes.values().stream().allMatch(predicate); + } + + private Set filterByState(ExpiryChangeState filter) { + return changes.entrySet().stream().filter(entry -> entry.getValue().equals(filter)).map(Map.Entry::getKey) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + + private static ExpiryChangeState stateFromValue(Number value) { + return ExpiryChangeState.of(value); + } + + public record ExpiryChangeState(long value) { + + public static final ExpiryChangeState DOES_NOT_EXIST = new ExpiryChangeState(-2L); + public static final ExpiryChangeState CONDITION_NOT_MET = new ExpiryChangeState(0L); + public static final ExpiryChangeState OK = new ExpiryChangeState(1L); + public static final ExpiryChangeState EXPIRED = new ExpiryChangeState(2L); + + static ExpiryChangeState of(Number value) { + return switch (value.intValue()) { + case -2 -> DOES_NOT_EXIST; + case 0 -> CONDITION_NOT_MET; + case 1 -> OK; + case 2 -> EXPIRED; + default -> new ExpiryChangeState(value.longValue()); + }; + } + + public boolean isOk() { + return OK.equals(this); + } + + public boolean isExpired() { + return EXPIRED.equals(this); + } + + public boolean isMissing() { + return DOES_NOT_EXIST.equals(this); + } + + public boolean isSkipped() { + return CONDITION_NOT_MET.equals(this); + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + + if (!(o instanceof ExpiryChangeState that)) { + return false; + } + + return this.value == that.value; + } + + @Override + public int hashCode() { + return Objects.hash(value); + } + } +} diff --git a/src/main/java/org/springframework/data/redis/core/HashOperations.java b/src/main/java/org/springframework/data/redis/core/HashOperations.java index 8a2c6641ad..f57143c737 100644 --- a/src/main/java/org/springframework/data/redis/core/HashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/HashOperations.java @@ -15,11 +15,18 @@ */ package org.springframework.data.redis.core; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; import org.springframework.lang.Nullable; /** @@ -28,6 +35,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Ninad Divadkar + * @author Tihomir Mateev */ public interface HashOperations { @@ -90,7 +98,7 @@ public interface HashOperations { Double increment(H key, HK hashKey, double delta); /** - * Return a random hash key (aka field) from the hash stored at {@code key}. + * Return a random hash key from the hash stored at {@code key}. * * @param key must not be {@literal null}. * @return {@literal null} if key does not exist or when used in pipeline / transaction. @@ -112,10 +120,10 @@ public interface HashOperations { Map.Entry randomEntry(H key); /** - * Return random hash keys (aka fields) from the hash stored at {@code key}. If the provided {@code count} argument is - * positive, return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is - * negative, the behavior changes and the command is allowed to return the same hash key multiple times. In this case, - * the number of returned fields is the absolute value of the specified count. + * Return random hash keys from the hash stored at {@code key}. If the provided {@code count} argument is positive, + * return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is negative, + * the behavior changes and the command is allowed to return the same hash key multiple times. In this case, the + * number of returned fields is the absolute value of the specified count. * * @param key must not be {@literal null}. * @param count number of fields to return. @@ -221,8 +229,127 @@ public interface HashOperations { */ Cursor> scan(H key, ScanOptions options); + /** + * Set time to live for given {@code hashKey} . + * + * @param key must not be {@literal null}. + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expire(H key, Duration timeout, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKeys} as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expireAt(H key, Instant expireAt, Collection hashKeys); + + /** + * Apply the expiration for given {@code hashKeys} as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expiration must not be {@literal null}. + * @param options must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + ExpireChanges expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKeys} . + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return changes to the hash fields. {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + ExpireChanges persist(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKeys} in seconds. + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return the actual expirations in seconds for the hash fields. {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + default Expirations getTimeToLive(H key, Collection hashKeys) { + return getTimeToLive(key, TimeUnit.SECONDS, hashKeys); + } + + /** + * Get the time to live for {@code hashKeys} and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return the actual expirations for the hash fields. {@literal null} when used in pipeline / transaction. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Expirations getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys); + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration operation. + * + * @param key must not be {@literal null}. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(H key) { + return new DefaultBoundHashFieldExpirationOperations<>(this, key, () -> keys(key)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(H key, HK... hashFields) { + return expiration(key, Arrays.asList(hashFields)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(H key, Collection hashFields) { + return new DefaultBoundHashFieldExpirationOperations<>(this, key, () -> hashFields); + } + /** * @return never {@literal null}. */ RedisOperations getOperations(); + } diff --git a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java index 2151590ecc..2fe3b0c65b 100644 --- a/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java +++ b/src/main/java/org/springframework/data/redis/core/ReactiveHashOperations.java @@ -19,9 +19,17 @@ import reactor.core.publisher.Mono; import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; +import org.springframework.data.redis.core.types.Expiration; +import org.springframework.data.redis.core.types.Expirations; +import org.springframework.lang.Nullable; /** * Reactive Redis operations for Hash Commands. @@ -95,7 +103,7 @@ public interface ReactiveHashOperations { Mono increment(H key, HK hashKey, double delta); /** - * Return a random hash key (aka field) from the hash stored at {@code key}. + * Return a random hash key from the hash stored at {@code key}. * * @param key must not be {@literal null}. * @return @@ -115,10 +123,10 @@ public interface ReactiveHashOperations { Mono> randomEntry(H key); /** - * Return random hash keys (aka fields) from the hash stored at {@code key}. If the provided {@code count} argument is - * positive, return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is - * negative, the behavior changes and the command is allowed to return the same hash key multiple times. In this case, - * the number of returned fields is the absolute value of the specified count. + * Return random hash keys from the hash stored at {@code key}. If the provided {@code count} argument is positive, + * return a list of distinct hash keys, capped either at {@code count} or the hash size. If {@code count} is negative, + * the behavior changes and the command is allowed to return the same hash key multiple times. In this case, the + * number of returned fields is the absolute value of the specified count. * * @param key must not be {@literal null}. * @param count number of fields to return. @@ -230,10 +238,91 @@ default Flux> scan(H key) { */ Flux> scan(H key, ScanOptions options); + /** + * Set time to live for given {@literal hashKeys} stored within {@literal key}. + * + * @param key must not be {@literal null}. + * @param timeout the amount of time after which the key will be expired, must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting changes to the hash fields. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + Mono> expire(H key, Duration timeout, Collection hashKeys); + + /** + * Set time to live for given {@literal hashKeys} stored within {@literal key}. + * + * @param key must not be {@literal null}. + * @param expiration must not be {@literal null}. + * @param options additional options to apply. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting changes to the hash fields. + * @throws IllegalArgumentException if the timeout is {@literal null}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + Mono> expire(H key, Expiration expiration, FieldExpirationOptions options, Collection hashKeys); + + /** + * Set the expiration for given {@code hashKey} as a {@literal date} timestamp. + * + * @param key must not be {@literal null}. + * @param expireAt must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting changes to the hash fields. + * @throws IllegalArgumentException if the instant is {@literal null} or too large to represent as a {@code Date}. + * @see Redis Documentation: HEXPIRE + * @since 3.5 + */ + @Nullable + Mono> expireAt(H key, Instant expireAt, Collection hashKeys); + + /** + * Remove the expiration from given {@code hashKey} . + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting changes to the hash fields. + * @see Redis Documentation: HPERSIST + * @since 3.5 + */ + @Nullable + Mono> persist(H key, Collection hashKeys); + + /** + * Get the time to live for {@code hashKey} in seconds. + * + * @param key must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting {@link Expirations} of the hash fields. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + default Mono> getTimeToLive(H key, Collection hashKeys) { + return getTimeToLive(key, TimeUnit.SECONDS, hashKeys); + } + + /** + * Get the time to live for {@code hashKey} and convert it to the given {@link TimeUnit}. + * + * @param key must not be {@literal null}. + * @param timeUnit must not be {@literal null}. + * @param hashKeys must not be {@literal null}. + * @return a {@link Mono} emitting {@link Expirations} of the hash fields. + * @see Redis Documentation: HTTL + * @since 3.5 + */ + @Nullable + Mono> getTimeToLive(H key, TimeUnit timeUnit, Collection hashKeys); + /** * Removes the given {@literal key}. * * @param key must not be {@literal null}. */ Mono delete(H key); + } diff --git a/src/main/java/org/springframework/data/redis/core/RedisCommand.java b/src/main/java/org/springframework/data/redis/core/RedisCommand.java index e9303233df..53dc940a97 100644 --- a/src/main/java/org/springframework/data/redis/core/RedisCommand.java +++ b/src/main/java/org/springframework/data/redis/core/RedisCommand.java @@ -107,6 +107,13 @@ public enum RedisCommand { HSET("w", 3, 3), // HSETNX("w", 3, 3), // HVALS("r", 1, 1), // + HEXPIRE("w", 5), // + HEXPIREAT("w", 5), // + HPEXPIRE("w", 5), // + HPEXPIREAT("w", 5), // + HPERSIST("w", 4), // + HTTL("r", 4), // + HPTTL("r", 4), // // -- I INCR("rw", 1), // INCRBYFLOAT("rw", 2, 2), // diff --git a/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java b/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java index 86d90ca882..c46e8478d6 100644 --- a/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java +++ b/src/main/java/org/springframework/data/redis/core/TimeoutUtils.java @@ -35,7 +35,11 @@ public abstract class TimeoutUtils { * @since 2.1 */ public static boolean hasMillis(Duration duration) { - return duration.toMillis() % 1000 != 0; + return containsSplitSecond(duration.toMillis()); + } + + public static boolean containsSplitSecond(long millis) { + return millis % 1000 != 0; } /** diff --git a/src/main/java/org/springframework/data/redis/core/types/Expiration.java b/src/main/java/org/springframework/data/redis/core/types/Expiration.java index 74eb9c3838..a68dd516b8 100644 --- a/src/main/java/org/springframework/data/redis/core/types/Expiration.java +++ b/src/main/java/org/springframework/data/redis/core/types/Expiration.java @@ -19,6 +19,7 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; +import org.springframework.data.redis.core.TimeoutUtils; import org.springframework.lang.Nullable; import org.springframework.util.Assert; import org.springframework.util.ObjectUtils; @@ -105,8 +106,8 @@ public static Expiration from(Duration duration) { Assert.notNull(duration, "Duration must not be null"); return duration.isZero() ? Expiration.persistent() - : duration.toMillis() % 1000 == 0 ? new Expiration(duration.getSeconds(), TimeUnit.SECONDS) - : new Expiration(duration.toMillis(), TimeUnit.MILLISECONDS); + : TimeoutUtils.hasMillis(duration) ? new Expiration(duration.toMillis(), TimeUnit.MILLISECONDS) + : new Expiration(duration.getSeconds(), TimeUnit.SECONDS); } /** diff --git a/src/main/java/org/springframework/data/redis/core/types/Expirations.java b/src/main/java/org/springframework/data/redis/core/types/Expirations.java new file mode 100644 index 0000000000..7101cb60fa --- /dev/null +++ b/src/main/java/org/springframework/data/redis/core/types/Expirations.java @@ -0,0 +1,333 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core.types; + +import java.time.Duration; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.springframework.lang.Nullable; +import org.springframework.util.CollectionUtils; +import org.springframework.util.ObjectUtils; + +/** + * Value Object linking a number of keys to their {@link TimeToLive} retaining the order of the original source. + * Dedicated higher level methods interpret raw expiration values retrieved from a Redis Client. + *
    + *
  1. {@link #persistent()} returns keys that do not have an associated time to live
  2. + *
  3. {@link #missing()} returns keys that do not exist and therefore have no associated time to live
  4. + *
  5. {@link #ttl()} returns the ordered list of {@link TimeToLive expirations} based on the raw values
  6. + *
  7. {@link #expiring()} returns the expiring keys along with their {@link Duration time to live}
  8. + *
+ * + * @author Christoph Strobl + * @author Mark Paluch + * @since 3.5 + */ +public class Expirations { + + private final TimeUnit unit; + private final Map expirations; + + Expirations(TimeUnit unit, Map expirations) { + this.unit = unit; + this.expirations = expirations; + } + + /** + * Factory Method to create {@link Expirations} from raw sources provided in a given {@link TimeUnit}. + * + * @param targetUnit the actual time unit of the raw timeToLive values. + * @param keys the keys to associated with the raw values in timeToLive. Defines the actual order of entries within + * {@link Expirations}. + * @param timeouts the raw Redis time to live values. + * @return new instance of {@link Expirations}. + * @param the key type used + */ + public static Expirations of(TimeUnit targetUnit, List keys, Timeouts timeouts) { + + if (keys.size() != timeouts.size()) { + throw new IllegalArgumentException( + "Keys and Timeouts must be of same size but was %s vs %s".formatted(keys.size(), timeouts.size())); + } + if (keys.size() == 1) { + return new Expirations<>(targetUnit, + Map.of(keys.iterator().next(), TimeToLive.of(timeouts.raw().iterator().next(), timeouts.timeUnit()))); + } + + Map target = CollectionUtils.newLinkedHashMap(keys.size()); + for (int i = 0; i < keys.size(); i++) { + target.put(keys.get(i), TimeToLive.of(timeouts.get(i), timeouts.timeUnit())); + } + return new Expirations<>(targetUnit, target); + } + + /** + * @return an ordered set of keys that do not have a time to live. + */ + public Set persistent() { + return filterByState(TimeToLive.PERSISTENT); + } + + /** + * @return an ordered set of keys that do not exist and therefore do not have a time to live. + */ + public Set missing() { + return filterByState(TimeToLive.MISSING); + } + + /** + * @return an ordered set of all {@link Expirations expirations} where the {@link TimeToLive#value()} is using the + * {@link TimeUnit} defined in {@link #timeUnit()}. + */ + public List ttl() { + return expirations.values().stream().map(it -> it.convert(this.unit)).toList(); + } + + /** + * @return the {@link TimeUnit} for {@link TimeToLive expirations} held by this instance. + */ + public TimeUnit timeUnit() { + return unit; + } + + /** + * @return an ordered {@link List} of {@link java.util.Map.Entry entries} combining keys with their actual time to + * live. {@link TimeToLive#isMissing() Missing} and {@link TimeToLive#isPersistent() persistent} entries are + * skipped. + */ + public List> expiring() { + + return expirations.entrySet().stream().filter(it -> !it.getValue().isMissing() && !it.getValue().isPersistent()) + .map(it -> Map.entry(it.getKey(), toDuration(it.getValue()))).toList(); + } + + /** + * @return the ordered collection of keys that are associated with an expiration. + */ + public Collection keys() { + return expirations.keySet(); + } + + /** + * @param key + * @return the {@link Expirations expirations} where the {@link TimeToLive#value()} is using the {@link TimeUnit} + * defined in {@link #timeUnit()} or {@literal null} if no entry could be found. + */ + @Nullable + public TimeToLive expirationOf(K key) { + + TimeToLive timeToLive = expirations.get(key); + + if (timeToLive == null) { + return null; + } + + return timeToLive.convert(this.unit); + } + + /** + * @param key + * @return the time to live value of the requested key if it exists and the expiration is neither + * {@link TimeToLive#isMissing() missing} nor {@link TimeToLive#isPersistent() persistent}, {@literal null} + * otherwise. + */ + @Nullable + public Duration ttlOf(K key) { + return toDuration(expirationOf(key)); + } + + private Set filterByState(TimeToLive filter) { + return expirations.entrySet().stream().filter(entry -> entry.getValue().equals(filter)).map(Map.Entry::getKey) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + + @Nullable + static Duration toDuration(@Nullable TimeToLive timeToLive) { + + if (timeToLive == null || timeToLive.sourceUnit == null) { + return null; + } + + return Duration.of(timeToLive.raw(), timeToLive.sourceUnit.toChronoUnit()); + } + + /** + * Collection of timeouts associated with a {@link TimeUnit}. + * + * @param timeUnit + * @param raw + */ + public record Timeouts(TimeUnit timeUnit, List raw) { + + Long get(int index) { + return raw.get(index); + } + + public int size() { + return raw.size(); + } + + } + + /** + * Expiration holds time to live {@link #raw()} values as returned by a Redis Client. {@link #value()} serves the + * actual timeout in the given temporal context converting the {@link #raw()} value into a target {@link TimeUnit}. + * Dedicated methods such as {@link #isPersistent()} allow interpretation of the raw result. {@link #MISSING} and + * {@link #PERSISTENT} mark predefined states returned by Redis indicating a time to live value could not be retrieved + * due to various reasons. + */ + public static class TimeToLive { + + /** + * Predefined {@link TimeToLive} for a key that does not exist and therefore does not have a time to live. + */ + public static TimeToLive MISSING = new TimeToLive(-2L); + + /** + * Predefined {@link TimeToLive} for a key that exists but does not expire. + */ + public static TimeToLive PERSISTENT = new TimeToLive(-1L); + + private final @Nullable TimeUnit sourceUnit; + private final @Nullable TimeUnit targetUnit; + private final long raw; + + TimeToLive(long value) { + this(value, null); + } + + TimeToLive(long value, @Nullable TimeUnit sourceUnit) { + this(value, sourceUnit, null); + } + + TimeToLive(long value, @Nullable TimeUnit sourceUnit, @Nullable TimeUnit targetUnit) { + this.raw = value; + this.sourceUnit = sourceUnit; + this.targetUnit = targetUnit; + } + + /** + * Factory method for creating {@link TimeToLive} instances, returning predefined ones if the value matches a known + * reserved state. + * + * @param value the TTL value. + * @param timeUnit time unit for the given value. + * @return the {@link TimeToLive} for the given raw value. + */ + public static TimeToLive of(Number value, TimeUnit timeUnit) { + + return switch (value.intValue()) { + case -2 -> MISSING; + case -1 -> PERSISTENT; + default -> new TimeToLive(value.longValue(), timeUnit); + }; + } + + /** + * The raw source value as returned by the Redis Client. + * + * @return the raw data. + */ + public long raw() { + return raw; + } + + /** + * @return the {@link #raw()} value converted into the {@link #convert(TimeUnit) requested} target {@link TimeUnit}. + */ + public long value() { + + if (sourceUnit == null || targetUnit == null) { + return raw; + } + + return targetUnit.convert(raw, sourceUnit); + } + + /** + * @param timeUnit must not be {@literal null}. + * @return the {@link TimeToLive} instance with new target {@link TimeUnit} set for obtaining the {@link #value() + * value}, or the same instance raw value cannot or must not be converted. + */ + public TimeToLive convert(TimeUnit timeUnit) { + + if (sourceUnit == null || ObjectUtils.nullSafeEquals(sourceUnit, timeUnit)) { + return this; + } + + return new TimeToLive(raw, sourceUnit, timeUnit); + } + + /** + * @return {@literal true} if key exists but does not expire. + */ + public boolean isPersistent() { + return PERSISTENT.raw() == raw(); + } + + /** + * @return {@literal true} if key does not exist and therefore does not have a time to live. + */ + public boolean isMissing() { + return MISSING.raw() == raw(); + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + + if (!(o instanceof Expirations.TimeToLive that)) { + return false; + } + + if (!ObjectUtils.nullSafeEquals(this.sourceUnit, that.sourceUnit)) { + return false; + } + + if (!ObjectUtils.nullSafeEquals(this.targetUnit, that.targetUnit)) { + return false; + } + + return this.raw == that.raw; + } + + @Override + public int hashCode() { + return Objects.hash(raw); + } + + @Override + public String toString() { + + return switch ((int) raw()) { + case -2 -> "MISSING"; + case -1 -> "PERSISTENT"; + default -> "%d %s".formatted(raw(), sourceUnit); + }; + } + } + +} diff --git a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java index 9dd0274783..18c3c24e20 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/DefaultRedisMap.java @@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.Cursor; import org.springframework.data.redis.core.RedisOperations; @@ -37,6 +38,7 @@ * @author Costin Leau * @author Christoph Strobl * @author Christian Bühler + * @author Tihomir Mateev */ public class DefaultRedisMap implements RedisMap { @@ -321,6 +323,16 @@ public Cursor> scan() { return scan(ScanOptions.NONE); } + @Override + public BoundHashFieldExpirationOperations expiration() { + return hashOps.expiration(); + } + + @Override + public BoundHashFieldExpirationOperations expiration(Collection hashFields) { + return hashOps.expiration(hashFields); + } + private void checkResult(@Nullable Object obj) { if (obj == null) { throw new IllegalStateException("Cannot read collection with Redis connection in pipeline/multi-exec mode"); diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java index a2eb3b8985..955942eb25 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisMap.java @@ -15,10 +15,13 @@ */ package org.springframework.data.redis.support.collections; +import java.util.Arrays; +import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentMap; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.lang.Nullable; /** @@ -26,6 +29,8 @@ * * @author Costin Leau * @author Christoph Strobl + * @author Tihomi Mateev + * @author Mark Paluch */ public interface RedisMap extends RedisStore, ConcurrentMap { @@ -64,11 +69,43 @@ public interface RedisMap extends RedisStore, ConcurrentMap { * @since 2.6 */ @Nullable - Map.Entry randomEntry(); + Map.Entry randomEntry(); /** * @since 1.4 * @return */ Iterator> scan(); + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at + * {@code key}. Operations on the expiration object obtain keys at the time of invoking any expiration operation. + * + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + BoundHashFieldExpirationOperations expiration(); + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + default BoundHashFieldExpirationOperations expiration(K... hashFields) { + return expiration(Arrays.asList(hashFields)); + } + + /** + * Returns a bound operations object to perform operations on the hash field expiration for all hash fields at the + * bound {@code key} for the given hash fields. + * + * @param hashFields collection of hash fields to operate on. + * @return the bound operations object to perform operations on the hash field expiration. + * @since 3.5 + */ + BoundHashFieldExpirationOperations expiration(Collection hashFields); + } diff --git a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java index d02d86b6fc..c6a5f0a45d 100644 --- a/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java +++ b/src/main/java/org/springframework/data/redis/support/collections/RedisProperties.java @@ -30,6 +30,7 @@ import java.util.concurrent.TimeUnit; import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.RedisOperations; import org.springframework.lang.Nullable; @@ -302,6 +303,17 @@ public synchronized void storeToXML(OutputStream os, String comment) throws IOEx @Override public Iterator> scan() { - throw new UnsupportedOperationException(); + return (Iterator) delegate.scan(); + } + + @Override + public BoundHashFieldExpirationOperations expiration() { + return (BoundHashFieldExpirationOperations) delegate.expiration(); } + + @Override + public BoundHashFieldExpirationOperations expiration(Collection hashFields) { + return (BoundHashFieldExpirationOperations) delegate.expiration((Collection) hashFields); + } + } diff --git a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java index 4662437afe..573f105247 100644 --- a/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/AbstractConnectionIntegrationTests.java @@ -15,24 +15,48 @@ */ package org.springframework.data.redis.connection; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; -import static org.awaitility.Awaitility.*; -import static org.junit.jupiter.api.condition.OS.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.*; -import static org.springframework.data.redis.connection.ClusterTestVariables.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoSearchStoreCommandArgs.*; -import static org.springframework.data.redis.core.ScanOptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.within; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.condition.OS.MAC; +import static org.springframework.data.redis.connection.BitFieldSubCommands.create; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.FAIL; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.INT_8; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.signed; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.unsigned; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_4; +import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.KILOMETERS; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoSearchArgs; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoSearchStoreCommandArgs.newGeoSearchStoreArgs; +import static org.springframework.data.redis.core.ScanOptions.scanOptions; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; import java.util.concurrent.BlockingDeque; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; @@ -98,6 +122,7 @@ import org.springframework.data.redis.test.condition.RedisDriver; import org.springframework.data.redis.test.util.HexStringUtils; import org.springframework.data.util.Streamable; +import org.springframework.util.ObjectUtils; /** * Base test class for AbstractConnection integration tests @@ -113,6 +138,7 @@ * @author Hendrik Duerkop * @author Shyngys Sapraliyev * @author Roman Osadchuk + * @author Tihomir Mateev */ public abstract class AbstractConnectionIntegrationTests { @@ -751,6 +777,45 @@ void testExecute() { assertThat(stringSerializer.deserialize((byte[]) getResults().get(1))).isEqualTo("bar"); } + @Test // GH- + @EnabledOnCommand("HEXPIRE") + void testExecuteHashFieldExpiration() { + + actual.add(connection.hSet("foo", "bar", "field")); + actual.add(connection.execute("HTTL", "foo", "FIELDS", "1", "bar")); + actual.add(connection.execute("HEXPIRE", "foo", "100", "NX", "FIELDS", "1", "bar")); + actual.add(connection.execute("HPERSIST", "foo", "FIELDS", "1", "bar")); + actual.add(connection.execute("HTTL", "foo", "FIELDS", "1", "bar")); + + List results = getResults(); + + assertThat(deserializeList(results, 1, stringSerializer)).containsOnly(-1L); + assertThat(deserializeList(results, 2, stringSerializer)).containsOnly(1L); + assertThat(deserializeList(results, 3, stringSerializer)).containsOnly(1L); + assertThat(deserializeList(results, 4, stringSerializer)).containsOnly(-1L); + } + + List deserializeList(List objects, int index, RedisSerializer serializer) { + + List result = new ArrayList<>(); + Object o = objects.get(index); + if (o instanceof List ls) { + for (Object obj : ls) { + + if (obj instanceof byte[]) { + result.add(serializer.deserialize((byte[]) obj)); + } else { + result.add(obj); + } + } + + return result; + } + + throw new IllegalArgumentException( + "Object at index " + index + " is not a list but " + ObjectUtils.nullSafeToString(o)); + } + @Test void testExecuteNoArgs() { @@ -3432,6 +3497,221 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { verifyResults(Arrays.asList(new Object[] { 0L })); } + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsSuccessAndSetsTTL() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "missking-field")); + actual.add(connection.hExpire("missing-key", 5L, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsTwoWhenZeroProvided() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 0, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsSuccessAndSetsTTL() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 5000L, "key-2")); + actual.add(connection.hpTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5000L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 5L, "missing-field")); + actual.add(connection.hpExpire("missing-key", 5L, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HPEXPIRE") + public void hpExpireReturnsTwoWhenZeroProvided() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpire("hash-hexpire", 0, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsSuccessAndSetsTTL() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + actual.add(connection.hExpireAt("hash-hexpire", inFiveSeconds, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + actual.add(connection.hpExpire("hash-hexpire", inFiveSeconds, "missing-field")); + actual.add(connection.hpExpire("missing-key", inFiveSeconds, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIREAT") + public void hExpireAtReturnsTwoWhenZeroProvided() { + + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpireAt("hash-hexpire", fiveSecondsAgo, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIREAT") + public void hpExpireAtReturnsSuccessAndSetsTTL() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + actual.add(connection.hpExpireAt("hash-hexpire", inFiveSeconds, "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + List results = getResults(); + assertThat(results.get(0)).isEqualTo(Boolean.TRUE); + assertThat((List) results.get(1)).contains(1L); + assertThat((List) results.get(2)).allSatisfy(value -> assertThat((Long) value).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIREAT") + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + actual.add(connection.hpExpireAt("hash-hexpire", inFiveSeconds, "missing-field")); + actual.add(connection.hpExpireAt("missing-key", inFiveSeconds, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HPEXPIREAT") + public void hpExpireAdReturnsTwoWhenZeroProvided() { + + long fiveSecondsAgo = Instant.now().minusSeconds(5L).getEpochSecond(); + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hpExpireAt("hash-hexpire", fiveSecondsAgo, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsSuccessAndPersistsField() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hExpire("hash-hexpire", 5L, "key-2")); + actual.add(connection.hPersist("hash-hexpire", "key-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(1L), List.of(1L), List.of(-1L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hPersist("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HPERSIST") + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hPersist("hash-hexpire", "missing-field")); + actual.add(connection.hPersist("missing-key", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-2L), List.of(-2L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hTtl("hash-hexpire", "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusIndependendOfTimeUnitOneWhenFieldHasNoExpiration() { + + actual.add(connection.hSet("hash-hexpire", "key-2", "value-2")); + actual.add(connection.hTtl("hash-hexpire", TimeUnit.HOURS, "key-2")); + + verifyResults(Arrays.asList(Boolean.TRUE, List.of(-1L))); + } + + @Test // GH-3054 + @EnabledOnCommand("HTTL") + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + actual.add(connection.hTtl("hash-hexpire", "missing-field")); + actual.add(connection.hTtl("missing-key", "key-2")); + + verifyResults(Arrays.asList(new Object[] { List.of(-2L), List.of(-2L) })); + } + @Test // DATAREDIS-694 void touchReturnsNrOfKeysTouched() { diff --git a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java index 6fe6eb43c1..4bed88c0a5 100644 --- a/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/jedis/JedisClusterConnectionTests.java @@ -15,17 +15,37 @@ */ package org.springframework.data.redis.connection.jedis; -import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.fail; import static org.assertj.core.data.Offset.offset; -import static org.springframework.data.redis.connection.BitFieldSubCommands.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.*; -import static org.springframework.data.redis.connection.ClusterTestVariables.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.*; -import static org.springframework.data.redis.connection.RedisListCommands.*; -import static org.springframework.data.redis.connection.RedisZSetCommands.*; -import static org.springframework.data.redis.core.ScanOptions.*; +import static org.springframework.data.redis.connection.BitFieldSubCommands.create; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.FAIL; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.INT_8; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.signed; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.unsigned; +import static org.springframework.data.redis.connection.ClusterTestVariables.CLUSTER_HOST; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_2_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_3_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.REPLICAOF_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_3; +import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.KILOMETERS; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs; +import static org.springframework.data.redis.connection.RedisListCommands.Direction; +import static org.springframework.data.redis.connection.RedisListCommands.Position; +import static org.springframework.data.redis.connection.RedisZSetCommands.Range; +import static org.springframework.data.redis.core.ScanOptions.NONE; +import static org.springframework.data.redis.core.ScanOptions.scanOptions; import redis.clients.jedis.ConnectionPool; import redis.clients.jedis.HostAndPort; @@ -36,14 +56,24 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.util.*; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.extension.ExtendWith; - import org.springframework.dao.DataAccessException; import org.springframework.dao.InvalidDataAccessApiUsageException; import org.springframework.data.domain.Range.Bound; @@ -84,6 +114,7 @@ * @author Mark Paluch * @author Pavel Khokhlov * @author Dennis Neufeld + * @author Tihomir Mateev */ @EnabledOnRedisClusterAvailable @ExtendWith(JedisExtension.class) @@ -1038,6 +1069,183 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireAtReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + // missing field + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireAdReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireAtReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + // missing field + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireAdReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hPersistReturnsSuccessAndPersistsField() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); + + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); + } + @Test // DATAREDIS-315 public void hValsShouldRetrieveValuesCorrectly() { @@ -1186,7 +1394,7 @@ public void blMoveShouldMoveElementsCorrectly() { .isEqualTo(VALUE_2_BYTES); assertThat( clusterConnection.bLMove(SAME_SLOT_KEY_1_BYTES, SAME_SLOT_KEY_2_BYTES, Direction.RIGHT, Direction.LEFT, 0.01)) - .isNull(); + .isNull(); assertThat(nativeConnection.lrange(SAME_SLOT_KEY_1, 0, -1)).isEmpty(); assertThat(nativeConnection.lrange(SAME_SLOT_KEY_2, 0, -1)).containsExactly(VALUE_2, VALUE_3); @@ -2786,13 +2994,13 @@ void bitFieldIncrByWithOverflowShouldWorkCorrectly() { assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L))) - .containsExactly(1L); + .containsExactly(1L); assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L))) - .containsExactly(2L); + .containsExactly(2L); assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L))) - .containsExactly(3L); + .containsExactly(3L); assertThat(clusterConnection.stringCommands() .bitField(JedisConverters.toBytes(KEY_1), create().incr(unsigned(2)).valueAt(BitFieldSubCommands.Offset.offset(102L)).overflow(FAIL).by(1L)) @@ -2804,7 +3012,7 @@ void bitfieldShouldAllowMultipleSubcommands() { assertThat(clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().incr(signed(5)).valueAt(BitFieldSubCommands.Offset.offset(100L)).by(1L).get(unsigned(4)).valueAt(0L))) - .containsExactly(1L, 0L); + .containsExactly(1L, 0L); } @Test // DATAREDIS-562 @@ -2814,13 +3022,13 @@ void bitfieldShouldWorkUsingNonZeroBasedOffset() { clusterConnection.stringCommands().bitField(JedisConverters.toBytes(KEY_1), create().set(INT_8).valueAt(BitFieldSubCommands.Offset.offset(0L).multipliedByTypeLength()).to(100L) .set(INT_8).valueAt(BitFieldSubCommands.Offset.offset(1L).multipliedByTypeLength()).to(200L))) - .containsExactly(0L, 0L); + .containsExactly(0L, 0L); assertThat( clusterConnection.stringCommands() .bitField(JedisConverters.toBytes(KEY_1), create().get(INT_8).valueAt(BitFieldSubCommands.Offset.offset(0L).multipliedByTypeLength()).get(INT_8) - .valueAt(BitFieldSubCommands.Offset.offset(1L).multipliedByTypeLength()))).containsExactly(100L, - -56L); + .valueAt(BitFieldSubCommands.Offset.offset(1L).multipliedByTypeLength()))) + .containsExactly(100L, -56L); } @Test // DATAREDIS-1005 @@ -2965,7 +3173,8 @@ void shouldUseCachedTopology() { assertThat(topology).isInstanceOf(JedisClusterConnection.JedisClusterTopology.class); assertThat(provider.shouldUseCachedValue(null)).isFalse(); - assertThat(provider.shouldUseCachedValue(new JedisClusterConnection.JedisClusterTopology(Set.of(), System.currentTimeMillis() - 101, 100))).isFalse(); + assertThat(provider.shouldUseCachedValue( + new JedisClusterConnection.JedisClusterTopology(Set.of(), System.currentTimeMillis() - 101, 100))).isFalse(); assertThat(provider.shouldUseCachedValue( new JedisClusterConnection.JedisClusterTopology(Set.of(), System.currentTimeMillis() + 100, 100))).isTrue(); } diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java index b1fe07dae6..1d45dc739e 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceClusterConnectionTests.java @@ -15,16 +15,35 @@ */ package org.springframework.data.redis.connection.lettuce; -import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; import static org.assertj.core.data.Offset.offset; -import static org.springframework.data.redis.connection.BitFieldSubCommands.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.*; -import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.*; -import static org.springframework.data.redis.connection.ClusterTestVariables.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.*; -import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.*; -import static org.springframework.data.redis.connection.RedisZSetCommands.*; -import static org.springframework.data.redis.core.ScanOptions.*; +import static org.springframework.data.redis.connection.BitFieldSubCommands.create; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldIncrBy.Overflow.FAIL; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.INT_8; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.signed; +import static org.springframework.data.redis.connection.BitFieldSubCommands.BitFieldType.unsigned; +import static org.springframework.data.redis.connection.ClusterTestVariables.CLUSTER_HOST; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.KEY_4; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_2_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.MASTER_NODE_3_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.REPLICAOF_NODE_1_PORT; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.SAME_SLOT_KEY_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_1; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_2; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_3; +import static org.springframework.data.redis.connection.ClusterTestVariables.VALUE_4; +import static org.springframework.data.redis.connection.RedisGeoCommands.DistanceUnit.KILOMETERS; +import static org.springframework.data.redis.connection.RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs; +import static org.springframework.data.redis.connection.RedisZSetCommands.Range; +import static org.springframework.data.redis.core.ScanOptions.scanOptions; import io.lettuce.core.cluster.RedisClusterClient; import io.lettuce.core.cluster.api.sync.RedisAdvancedClusterCommands; @@ -32,7 +51,18 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.util.*; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.assertj.core.data.Offset; @@ -48,11 +78,21 @@ import org.springframework.data.geo.Distance; import org.springframework.data.geo.GeoResults; import org.springframework.data.geo.Point; -import org.springframework.data.redis.connection.*; +import org.springframework.data.redis.connection.BitFieldSubCommands; +import org.springframework.data.redis.connection.ClusterConnectionTests; +import org.springframework.data.redis.connection.ClusterSlotHashUtil; +import org.springframework.data.redis.connection.ClusterTestVariables; +import org.springframework.data.redis.connection.DataType; +import org.springframework.data.redis.connection.DefaultSortParameters; import org.springframework.data.redis.connection.Limit; +import org.springframework.data.redis.connection.RedisClusterConfiguration; +import org.springframework.data.redis.connection.RedisClusterConnection; +import org.springframework.data.redis.connection.RedisClusterNode; import org.springframework.data.redis.connection.RedisClusterNode.SlotRange; import org.springframework.data.redis.connection.RedisGeoCommands.GeoLocation; +import org.springframework.data.redis.connection.RedisListCommands; import org.springframework.data.redis.connection.RedisListCommands.Position; +import org.springframework.data.redis.connection.RedisNode; import org.springframework.data.redis.connection.RedisServerCommands.FlushOption; import org.springframework.data.redis.connection.RedisStringCommands.BitOperation; import org.springframework.data.redis.connection.RedisStringCommands.SetOption; @@ -73,6 +113,7 @@ * @author Christoph Strobl * @author Mark Paluch * @author Dennis Neufeld + * @author Tihomir Mateev */ @SuppressWarnings("deprecation") @EnabledOnRedisClusterAvailable @@ -1095,6 +1136,182 @@ public void hStrLenReturnsZeroWhenKeyDoesNotExist() { assertThat(clusterConnection.hashCommands().hStrLen(KEY_1_BYTES, KEY_1_BYTES)).isEqualTo(0L); } + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5000L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, TimeUnit.MILLISECONDS, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isBetween(0L, 5000L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + // missing field + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 5L, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpire(KEY_2_BYTES, 5L, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpire(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireAtReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).allSatisfy(val -> assertThat(val).isBetween(0L, 5L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).getEpochSecond(); + + // missing field + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + + // missing key + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hExpireAdReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test + @EnabledOnCommand("HEXPIRE") + public void hpExpireAtReturnsSuccessAndSetsTTL() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hpTtl(KEY_1_BYTES, KEY_2_BYTES)) + .allSatisfy(val -> assertThat(val).isGreaterThan(1000L).isLessThanOrEqualTo(5000L)); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireAtReturnsMinusTwoWhenFieldDoesNotExist() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + long inFiveSeconds = Instant.now().plusSeconds(5L).toEpochMilli(); + + // missing field + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, inFiveSeconds, KEY_1_BYTES)).contains(-2L); + // missing key + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_2_BYTES, inFiveSeconds, KEY_2_BYTES)).contains(-2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hpExpireAdReturnsTwoWhenZeroProvided() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hpExpireAt(KEY_1_BYTES, 0L, KEY_2_BYTES)).contains(2L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hPersistReturnsSuccessAndPersistsField() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hExpire(KEY_1_BYTES, 5L, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(1L); + assertThat(clusterConnection.hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hPersistReturnsMinusOneWhenFieldDoesNotHaveExpiration() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hPersistReturnsMinusTwoWhenFieldOrKeyMissing() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hPersist(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hPersist(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); + + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hTtlReturnsMinusOneWhenFieldHasNoExpiration() { + + nativeConnection.hset(KEY_1, KEY_2, VALUE_3); + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_2_BYTES)).contains(-1L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, TimeUnit.HOURS, KEY_2_BYTES)).contains(-1L); + } + + @Test // GH-3054 + @EnabledOnCommand("HEXPIRE") + public void hTtlReturnsMinusTwoWhenFieldOrKeyMissing() { + + assertThat(clusterConnection.hashCommands().hTtl(KEY_1_BYTES, KEY_1_BYTES)).contains(-2L); + assertThat(clusterConnection.hashCommands().hTtl(KEY_3_BYTES, KEY_2_BYTES)).contains(-2L); + } + @Test // DATAREDIS-315 public void hValsShouldRetrieveValuesCorrectly() { diff --git a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java index 973e061f90..bc1f8cc204 100644 --- a/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/connection/lettuce/LettuceReactiveHashCommandsIntegrationTests.java @@ -15,12 +15,14 @@ */ package org.springframework.data.redis.connection.lettuce; -import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; import reactor.test.StepVerifier; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -28,6 +30,7 @@ import java.util.Map; import org.springframework.data.redis.core.ScanOptions; +import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.parametrized.ParameterizedRedisTest; /** @@ -35,6 +38,7 @@ * * @author Christoph Strobl * @author Mark Paluch + * @author Tihomir Mateev */ public class LettuceReactiveHashCommandsIntegrationTests extends LettuceReactiveCommandsTestSupport { @@ -103,8 +107,7 @@ void hMGetShouldReturnValueForFields() { nativeCommands.hset(KEY_1, FIELD_3, VALUE_3); connection.hashCommands().hMGet(KEY_1_BBUFFER, Arrays.asList(FIELD_1_BBUFFER, FIELD_3_BBUFFER)) - .as(StepVerifier::create) - .consumeNextWith(actual -> { + .as(StepVerifier::create).consumeNextWith(actual -> { assertThat(actual).contains(VALUE_1_BBUFFER, VALUE_3_BBUFFER); @@ -120,13 +123,11 @@ void hMGetShouldReturnNullValueForFieldsThatHaveNoValue() { connection.hashCommands().hMGet(KEY_1_BBUFFER, Collections.singletonList(FIELD_1_BBUFFER)).as(StepVerifier::create) .expectNext(Collections.singletonList(VALUE_1_BBUFFER)).verifyComplete(); - connection.hashCommands().hMGet(KEY_1_BBUFFER, Collections.singletonList(FIELD_2_BBUFFER)) - .as(StepVerifier::create) + connection.hashCommands().hMGet(KEY_1_BBUFFER, Collections.singletonList(FIELD_2_BBUFFER)).as(StepVerifier::create) .expectNext(Collections.singletonList(null)).verifyComplete(); connection.hashCommands().hMGet(KEY_1_BBUFFER, Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER)) - .as(StepVerifier::create) - .expectNext(Arrays.asList(VALUE_1_BBUFFER, null, VALUE_3_BBUFFER)).verifyComplete(); + .as(StepVerifier::create).expectNext(Arrays.asList(VALUE_1_BBUFFER, null, VALUE_3_BBUFFER)).verifyComplete(); } @ParameterizedRedisTest // DATAREDIS-525 @@ -193,8 +194,7 @@ void hDelShouldRemoveMultipleFieldsCorrectly() { nativeCommands.hset(KEY_1, FIELD_3, VALUE_3); connection.hashCommands().hDel(KEY_1_BBUFFER, Arrays.asList(FIELD_1_BBUFFER, FIELD_3_BBUFFER)) - .as(StepVerifier::create) - .expectNext(2L).verifyComplete(); + .as(StepVerifier::create).expectNext(2L).verifyComplete(); } @ParameterizedRedisTest // DATAREDIS-525 @@ -288,4 +288,52 @@ void hStrLenReturnsZeroWhenKeyDoesNotExist() { connection.hashCommands().hStrLen(KEY_1_BBUFFER, FIELD_1_BBUFFER).as(StepVerifier::create).expectNext(0L) // .verifyComplete(); } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void hExpireShouldHandleMultipleParametersCorrectly() { + + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hExpire(KEY_1_BBUFFER, Duration.ofSeconds(1), fields).as(StepVerifier::create) // + .expectNext(1L).expectNext(1L).expectNext(-2L).expectComplete().verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void hExpireAtShouldHandleMultipleParametersCorrectly() { + + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hExpireAt(KEY_1_BBUFFER, Instant.now().plusSeconds(1), fields).as(StepVerifier::create) // + .expectNext(1L).expectNext(1L).expectNext(-2L).expectComplete().verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isBetween(0L, 1000L)); + assertThat(nativeCommands.httl(KEY_1, FIELD_3)).allSatisfy(it -> assertThat(it).isEqualTo(-2L)); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void hPersistShouldPersistFields() { + + assertThat(nativeCommands.hset(KEY_1, FIELD_1, VALUE_1)).isTrue(); + assertThat(nativeCommands.hset(KEY_1, FIELD_2, VALUE_2)).isTrue(); + + assertThat(nativeCommands.hexpire(KEY_1, 1000, FIELD_1)).allSatisfy(it -> assertThat(it).isEqualTo(1L)); + + final var fields = Arrays.asList(FIELD_1_BBUFFER, FIELD_2_BBUFFER, FIELD_3_BBUFFER); + + connection.hashCommands().hPersist(KEY_1_BBUFFER, fields).as(StepVerifier::create) // + .expectNext(1L).expectNext(-1L).expectNext(-2L).expectComplete().verify(); + + assertThat(nativeCommands.httl(KEY_1, FIELD_1, FIELD_2)).allSatisfy(it -> assertThat(it).isEqualTo(-1L)); + } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java index 8a2b7065ad..6499ae325a 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultHashOperationsIntegrationTests.java @@ -15,21 +15,30 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.assertj.core.api.InstanceOfAssertFactories; import org.junit.jupiter.api.BeforeEach; - import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.StringObjectFactory; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.extension.JedisConnectionFactoryExtension; +import org.springframework.data.redis.core.ExpireChanges.ExpiryChangeState; +import org.springframework.data.redis.core.types.Expirations.TimeToLive; +import org.springframework.data.redis.test.condition.EnabledOnCommand; import org.springframework.data.redis.test.extension.RedisStanalone; import org.springframework.data.redis.test.extension.parametrized.MethodSource; import org.springframework.data.redis.test.extension.parametrized.ParameterizedRedisTest; @@ -39,6 +48,7 @@ * * @author Jennifer Hickey * @author Christoph Strobl + * @author Tihomir Mateev * @param Key type * @param Hash key type * @param Hash value type @@ -131,7 +141,6 @@ void testHScanReadsValuesFully() throws IOException { hashOps.put(key, key1, val1); hashOps.put(key, key2, val2); - long count = 0; try (Cursor> it = hashOps.scan(key, ScanOptions.scanOptions().count(1).build())) { @@ -202,4 +211,196 @@ void randomValue() { Map values = hashOps.randomEntries(key, 10); assertThat(values).hasSize(2).containsEntry(key1, val1).containsEntry(key2, val2); } + + @EnabledOnCommand("HEXPIRE") // GH-3054 + @ParameterizedRedisTest + void testExpireAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expire(key, Duration.ofMillis(500), List.of(key1))) + .satisfies(ExpireChanges::allOk); + + assertThat(redisTemplate.opsForHash().getTimeToLive(key, List.of(key1))).satisfies(expirations -> { + + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 1L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofSeconds(1)); + }); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expire(key, Duration.ofSeconds(5), List.of(key1, key2))) + .satisfies(changes -> { + assertThat(changes.allOk()).isTrue(); + assertThat(changes.stateOf(key1)).isEqualTo(ExpiryChangeState.OK); + assertThat(changes.ok()).containsExactlyInAnyOrder(key1, key2); + assertThat(changes.missed()).isEmpty(); + assertThat(changes.stateChanges()).map(ExpiryChangeState::value).containsExactly(1L, 1L); + }); + + assertThat(redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.SECONDS, List.of(key1, key2))) + .satisfies(expirations -> { + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 5L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ofSeconds(1), Duration.ofSeconds(5)); + }); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testBoundExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + BoundHashOperations hashOps = redisTemplate.boundHashOps(key); + BoundHashFieldExpirationOperations exp = hashOps.expiration(key1, key2); + + assertThat(exp.expire(Duration.ofSeconds(5))).satisfies(changes -> { + assertThat(changes.allOk()).isTrue(); + assertThat(changes.stateOf(key1)).isEqualTo(ExpiryChangeState.OK); + assertThat(changes.ok()).containsExactlyInAnyOrder(key1, key2); + assertThat(changes.missed()).isEmpty(); + assertThat(changes.stateChanges()).map(ExpiryChangeState::value).containsExactly(1L, 1L); + }); + + assertThat(exp.getTimeToLive(TimeUnit.SECONDS)).satisfies(expirations -> { + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.SECONDS); + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 5L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ofSeconds(1), Duration.ofSeconds(5)); + }); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireAtAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(500), List.of(key1, key2))) + .satisfies(ExpireChanges::allOk); + + assertThat(redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.MILLISECONDS, List.of(key1, key2))) + .satisfies(expirations -> { + assertThat(expirations.missing()).isEmpty(); + assertThat(expirations.timeUnit()).isEqualTo(TimeUnit.MILLISECONDS); + assertThat(expirations.expirationOf(key1)).extracting(TimeToLive::raw, InstanceOfAssertFactories.LONG) + .isBetween(0L, 500L); + assertThat(expirations.ttlOf(key1)).isBetween(Duration.ZERO, Duration.ofMillis(500)); + }); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void expireThrowsErrorOfNanoPrecision() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + + assertThatExceptionOfType(IllegalArgumentException.class) + .isThrownBy(() -> redisTemplate.opsForHash().getTimeToLive(key, TimeUnit.NANOSECONDS, List.of(key1))); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireWithOptionsNone() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + ExpireChanges expire = redisTemplate.opsForHash().expire(key, + org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), List.of(key1)); + + assertThat(expire.allOk()).isTrue(); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireWithOptions() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), + FieldExpirationOptions.none(), List.of(key1)); + redisTemplate.opsForHash().expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), + FieldExpirationOptions.none(), List.of(key2)); + + ExpireChanges changes = redisTemplate.opsForHash().expire(key, + org.springframework.data.redis.core.types.Expiration.seconds(30), FieldExpirationOptions.builder().gt().build(), + List.of(key1, key2)); + + assertThat(changes.ok()).containsExactly(key1); + assertThat(changes.skipped()).containsExactly(key2); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testPersistAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + hashOps.put(key, key1, val1); + hashOps.put(key, key2, val2); + + assertThat(redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(800), List.of(key1, key2))) + .satisfies(ExpireChanges::allOk); + + assertThat(redisTemplate.opsForHash().persist(key, List.of(key2))).satisfies(ExpireChanges::allOk); + + assertThat(redisTemplate.opsForHash().getTimeToLive(key, List.of(key1, key2))).satisfies(expirations -> { + assertThat(expirations.expirationOf(key1).isPersistent()).isFalse(); + assertThat(expirations.expirationOf(key2).isPersistent()).isTrue(); + }); + } } diff --git a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java index dd1e1287ef..48532b2feb 100644 --- a/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/core/DefaultReactiveHashOperationsIntegrationTests.java @@ -15,28 +15,32 @@ */ package org.springframework.data.redis.core; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; -import static org.junit.jupiter.api.condition.OS.*; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.junit.jupiter.api.condition.OS.MAC; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.springframework.data.redis.connection.convert.Converters; import reactor.test.StepVerifier; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.BeforeEach; - +import org.junit.jupiter.api.condition.DisabledOnOs; import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.SettingsUtils; import org.springframework.data.redis.StringObjectFactory; +import org.springframework.data.redis.connection.Hash.FieldExpirationOptions; import org.springframework.data.redis.connection.RedisConnection; import org.springframework.data.redis.connection.RedisConnectionFactory; +import org.springframework.data.redis.connection.convert.Converters; import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory; import org.springframework.data.redis.serializer.RedisSerializationContext; import org.springframework.data.redis.serializer.StringRedisSerializer; @@ -501,6 +505,148 @@ void scan() { .verifyComplete(); } + @EnabledOnCommand("HEXPIRE") // GH-3054 + @ParameterizedRedisTest + void testExpireAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + hashOperations.expire(key, Duration.ofMillis(1500), List.of(key1)) // + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations.getTimeToLive(key, List.of(key1)) // + .as(StepVerifier::create) // + .assertNext(it -> { + assertThat(it.expirationOf(key1).raw()).isBetween(0L, 2L); + }).verifyComplete(); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireWithOptions() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + hashOperations + .expire(key, org.springframework.data.redis.core.types.Expiration.seconds(20), FieldExpirationOptions.none(), + List.of(key1)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + hashOperations + .expire(key, org.springframework.data.redis.core.types.Expiration.seconds(60), FieldExpirationOptions.none(), + List.of(key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations + .expire(key, org.springframework.data.redis.core.types.Expiration.seconds(30), + FieldExpirationOptions.builder().gt().build(), List.of(key1, key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.ok()).containsExactly(key1); + assertThat(changes.skipped()).containsExactly(key2); + }).verifyComplete(); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireAndGetExpireSeconds() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + hashOperations.expire(key, Duration.ofSeconds(5), List.of(key1, key2)) // + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + hashOperations.getTimeToLive(key, TimeUnit.SECONDS, List.of(key1, key2)) // + .as(StepVerifier::create) // + .assertNext(it -> { + assertThat(it.expirationOf(key1).raw()).isBetween(0L, 5L); + assertThat(it.expirationOf(key2).raw()).isBetween(0L, 5L); + }).verifyComplete(); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireAtAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(1500), List.of(key1, key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + redisTemplate.opsForHash().getTimeToLive(key, List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(it -> { + assertThat(it.expirationOf(key1).raw()).isBetween(0L, 2L); + assertThat(it.expirationOf(key2).raw()).isBetween(0L, 2L); + }).verifyComplete(); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testPersistAndGetExpireMillis() { + + K key = keyFactory.instance(); + HK key1 = hashKeyFactory.instance(); + HV val1 = hashValueFactory.instance(); + HK key2 = hashKeyFactory.instance(); + HV val2 = hashValueFactory.instance(); + + putAll(key, key1, val1, key2, val2); + + redisTemplate.opsForHash().expireAt(key, Instant.now().plusMillis(1500), List.of(key1, key2)) + .as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + redisTemplate.opsForHash().persist(key, List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(changes -> { + assertThat(changes.allOk()).isTrue(); + }).verifyComplete(); + + redisTemplate.opsForHash().getTimeToLive(key, List.of(key1, key2)).as(StepVerifier::create)// + .assertNext(expirations -> { + assertThat(expirations.persistent()).contains(key1, key2); + }).verifyComplete(); + } + @ParameterizedRedisTest // DATAREDIS-602 void delete() { diff --git a/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java b/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java new file mode 100644 index 0000000000..db5ba98605 --- /dev/null +++ b/src/test/java/org/springframework/data/redis/core/types/ExpirationsUnitTest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2025 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.core.types; + +import static org.assertj.core.api.Assertions.*; + +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import org.springframework.data.redis.core.types.Expirations.Timeouts; + +/** + * Unit test for {@link Expirations} + * + * @author Christoph Strobl + * @author Mark Paluch + */ +class ExpirationsUnitTest { + + static final String KEY_1 = "key-1"; + static final String KEY_2 = "key-2"; + static final String KEY_3 = "key-3"; + + @ParameterizedTest // GH-3054 + @EnumSource(TimeUnit.class) + void expirationMemorizesSourceUnit(TimeUnit targetUnit) { + + Expirations exp = Expirations.of(targetUnit, List.of(KEY_1), new Timeouts(TimeUnit.SECONDS, List.of(120L))); + + assertThat(exp.ttl().get(0)).satisfies(expiration -> { + assertThat(expiration.raw()).isEqualTo(120L); + assertThat(expiration.value()).isEqualTo(targetUnit.convert(120, TimeUnit.SECONDS)); + }); + } + + @Test // GH-3054 + void expirationsCategorizesElements() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.persistent()).containsExactly(KEY_2); + assertThat(exp.missing()).containsExactly(KEY_1); + assertThat(exp.expiring()).containsExactly(Map.entry(KEY_3, Duration.ofMinutes(2))); + } + + @Test // GH-3054 + void returnsNullForMissingElements() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.expirationOf("missing")).isNull(); + assertThat(exp.ttlOf("missing")).isNull(); + } + + @Test // GH-3054 + void ttlReturnsDurationForEntriesWithTimeout() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.ttlOf(KEY_3)).isEqualTo(Duration.ofMinutes(2)); + } + + @Test // GH-3054 + void ttlReturnsNullForPersistentAndMissingEntries() { + + Expirations exp = createExpirations(new Timeouts(TimeUnit.SECONDS, List.of(-2L, -1L, 120L))); + + assertThat(exp.ttlOf(KEY_1)).isNull(); + assertThat(exp.ttlOf(KEY_2)).isNull(); + } + + @Test // GH-3054 + void shouldRenderToString() { + + assertThat(Expirations.TimeToLive.PERSISTENT).hasToString("PERSISTENT"); + assertThat(Expirations.TimeToLive.MISSING).hasToString("MISSING"); + assertThat(Expirations.TimeToLive.of(1, TimeUnit.SECONDS)).hasToString("1 SECONDS"); + } + + static Expirations createExpirations(Timeouts timeouts) { + + List keys = IntStream.range(1, timeouts.raw().size() + 1).mapToObj("key-%s"::formatted).toList(); + return Expirations.of(timeouts.timeUnit(), keys, timeouts); + } +} diff --git a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java index 435d5b4500..31e93d06ac 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/AbstractRedisMapIntegrationTests.java @@ -15,19 +15,24 @@ */ package org.springframework.data.redis.support.collections; -import static org.assertj.core.api.Assertions.*; -import static org.assertj.core.api.Assumptions.*; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; +import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; import java.text.DecimalFormat; +import java.time.Duration; +import java.time.Instant; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assumptions; import org.junit.jupiter.api.BeforeEach; @@ -37,7 +42,9 @@ import org.springframework.data.redis.ObjectFactory; import org.springframework.data.redis.RawObjectFactory; import org.springframework.data.redis.RedisSystemException; +import org.springframework.data.redis.core.BoundHashFieldExpirationOperations; import org.springframework.data.redis.core.Cursor; +import org.springframework.data.redis.core.ExpireChanges; import org.springframework.data.redis.core.RedisCallback; import org.springframework.data.redis.core.RedisOperations; import org.springframework.data.redis.core.RedisTemplate; @@ -190,6 +197,44 @@ void testIncrement() { assertThat(map.increment(k1, 10)).isEqualTo(Long.valueOf(Long.valueOf((String) v1) + 10)); } + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpire() { + + K k1 = getKey(); + V v1 = getValue(); + assertThat(map.put(k1, v1)).isEqualTo(null); + + BoundHashFieldExpirationOperations ops = map.expiration(Collections.singletonList(k1)); + assertThat(ops.expire(Duration.ofSeconds(5))).satisfies(ExpireChanges::allOk); + assertThat(ops.getTimeToLive()).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); + }); + assertThat(ops.getTimeToLive(TimeUnit.MILLISECONDS)).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); + }); + assertThat(ops.persist()).satisfies(ExpireChanges::allOk); + } + + @ParameterizedRedisTest // GH-3054 + @EnabledOnCommand("HEXPIRE") + void testExpireAt() { + + K k1 = getKey(); + V v1 = getValue(); + assertThat(map.put(k1, v1)).isEqualTo(null); + + BoundHashFieldExpirationOperations ops = map.expiration(Collections.singletonList(k1)); + assertThat(ops.expireAt(Instant.now().plusSeconds(5))).satisfies(ExpireChanges::allOk); + assertThat(ops.getTimeToLive()).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1L, 5L); + }); + assertThat(ops.getTimeToLive(TimeUnit.MILLISECONDS)).satisfies(expiration -> { + assertThat(expiration.expirationOf(k1).raw()).isBetween(1000L, 5000L); + }); + assertThat(ops.persist()).satisfies(ExpireChanges::allOk); + } + @ParameterizedRedisTest void testIncrementDouble() { assumeThat(valueFactory instanceof DoubleAsStringObjectFactory).isTrue(); @@ -496,4 +541,5 @@ public void randomEntryFromHash() { assertThat(map.randomEntry()).isIn(new AbstractMap.SimpleImmutableEntry(k1, v1), new AbstractMap.SimpleImmutableEntry(k2, v2)); } + } diff --git a/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java b/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java index 17e63c3113..be3e627a28 100644 --- a/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java +++ b/src/test/java/org/springframework/data/redis/support/collections/RedisPropertiesIntegrationTests.java @@ -185,12 +185,6 @@ void testStringPropertyNames() throws Exception { assertThat(keys.contains(key3)).isTrue(); } - @ParameterizedRedisTest - @Override - public void testScanWorksCorrectly() { - assertThatExceptionOfType(UnsupportedOperationException.class).isThrownBy(() -> super.testScanWorksCorrectly()); - } - // DATAREDIS-241 public static Collection testParams() {